Repository: hadoop Updated Branches: refs/heads/YARN-2915 b91e900cc -> 6452592b8 (forced update)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java index 13dcccf..15a8756 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; @@ -54,13 +55,15 @@ public class TestAddOverReplicatedStripedBlocks { private DistributedFileSystem fs; private final Path dirPath = new Path("/striped"); private Path filePath = new Path(dirPath, "file"); - private final short DATA_BLK_NUM = StripedFileTestUtil.NUM_DATA_BLOCKS; - private final short PARITY_BLK_NUM = StripedFileTestUtil.NUM_PARITY_BLOCKS; - private final short GROUP_SIZE = (short) (DATA_BLK_NUM + PARITY_BLK_NUM); - private final int CELLSIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE; - private final int NUM_STRIPE_PER_BLOCK = 4; - private final int BLOCK_SIZE = NUM_STRIPE_PER_BLOCK * CELLSIZE; - private final int numDNs = GROUP_SIZE + 3; + private final ErasureCodingPolicy ecPolicy = + ErasureCodingPolicyManager.getSystemDefaultPolicy(); + private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); + private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); + private final short groupSize = (short) (dataBlocks + parityBlocks); + private final int cellSize = ecPolicy.getCellSize(); + private final int stripesPerBlock = 4; + private final int blockSize = stripesPerBlock * cellSize; + private final int numDNs = groupSize + 3; @Rule public Timeout globalTimeout = new Timeout(300000); @@ -68,7 +71,7 @@ public class TestAddOverReplicatedStripedBlocks { @Before public void setup() throws IOException { Configuration conf = new Configuration(); - conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // disable block recovery conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); @@ -92,17 +95,17 @@ public class TestAddOverReplicatedStripedBlocks { @Test public void testProcessOverReplicatedStripedBlock() throws Exception { // create a file which has exact one block group to the first GROUP_SIZE DNs - long fileLen = DATA_BLK_NUM * BLOCK_SIZE; + long fileLen = dataBlocks * blockSize; DFSTestUtil.createStripedFile(cluster, filePath, null, 1, - NUM_STRIPE_PER_BLOCK, false); + stripesPerBlock, false); LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations( filePath.toString(), 0, fileLen); LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0)); long gs = bg.getBlock().getGenerationStamp(); String bpid = bg.getBlock().getBlockPoolId(); long groupId = bg.getBlock().getBlockId(); - Block blk = new Block(groupId, BLOCK_SIZE, gs); - for (int i = 0; i < GROUP_SIZE; i++) { + Block blk = new Block(groupId, blockSize, gs); + for (int i = 0; i < groupSize; i++) { blk.setBlockId(groupId + i); cluster.injectBlocks(i, Arrays.asList(blk), bpid); } @@ -113,7 +116,7 @@ public class TestAddOverReplicatedStripedBlocks { cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid); cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid); // let a internal block be over replicated with 1 redundant block. - blk.setBlockId(groupId + DATA_BLK_NUM); + blk.setBlockId(groupId + dataBlocks); cluster.injectBlocks(numDNs - 1, Arrays.asList(blk), bpid); // update blocksMap @@ -128,14 +131,14 @@ public class TestAddOverReplicatedStripedBlocks { // verify that all internal blocks exists lbs = cluster.getNameNodeRpc().getBlockLocations( filePath.toString(), 0, fileLen); - StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, GROUP_SIZE); + StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize); } @Test public void testProcessOverReplicatedSBSmallerThanFullBlocks() throws Exception { // Create a EC file which doesn't fill full internal blocks. - int fileLen = CELLSIZE * (DATA_BLK_NUM - 1); + int fileLen = cellSize * (dataBlocks - 1); byte[] content = new byte[fileLen]; DFSTestUtil.writeFile(fs, filePath, new String(content)); LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations( @@ -144,7 +147,7 @@ public class TestAddOverReplicatedStripedBlocks { long gs = bg.getBlock().getGenerationStamp(); String bpid = bg.getBlock().getBlockPoolId(); long groupId = bg.getBlock().getBlockId(); - Block blk = new Block(groupId, BLOCK_SIZE, gs); + Block blk = new Block(groupId, blockSize, gs); cluster.triggerBlockReports(); List<DatanodeInfo> infos = Arrays.asList(bg.getLocations()); @@ -171,25 +174,25 @@ public class TestAddOverReplicatedStripedBlocks { // verify that all internal blocks exists lbs = cluster.getNameNodeRpc().getBlockLocations( filePath.toString(), 0, fileLen); - StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, GROUP_SIZE - 1); + StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize - 1); } @Test public void testProcessOverReplicatedAndCorruptStripedBlock() throws Exception { - long fileLen = DATA_BLK_NUM * BLOCK_SIZE; + long fileLen = dataBlocks * blockSize; DFSTestUtil.createStripedFile(cluster, filePath, null, 1, - NUM_STRIPE_PER_BLOCK, false); + stripesPerBlock, false); LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations( filePath.toString(), 0, fileLen); LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0)); long gs = bg.getBlock().getGenerationStamp(); String bpid = bg.getBlock().getBlockPoolId(); long groupId = bg.getBlock().getBlockId(); - Block blk = new Block(groupId, BLOCK_SIZE, gs); + Block blk = new Block(groupId, blockSize, gs); BlockInfoStriped blockInfo = new BlockInfoStriped(blk, ErasureCodingPolicyManager.getSystemDefaultPolicy()); - for (int i = 0; i < GROUP_SIZE; i++) { + for (int i = 0; i < groupSize; i++) { blk.setBlockId(groupId + i); cluster.injectBlocks(i, Arrays.asList(blk), bpid); } @@ -225,14 +228,14 @@ public class TestAddOverReplicatedStripedBlocks { lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen); bg = (LocatedStripedBlock) (lbs.get(0)); - assertEquals(GROUP_SIZE + 1, bg.getBlockIndices().length); - assertEquals(GROUP_SIZE + 1, bg.getLocations().length); - BitSet set = new BitSet(GROUP_SIZE); + assertEquals(groupSize + 1, bg.getBlockIndices().length); + assertEquals(groupSize + 1, bg.getLocations().length); + BitSet set = new BitSet(groupSize); for (byte index : bg.getBlockIndices()) { set.set(index); } Assert.assertFalse(set.get(0)); - for (int i = 1; i < GROUP_SIZE; i++) { + for (int i = 1; i < groupSize; i++) { assertTrue(set.get(i)); } } @@ -243,18 +246,18 @@ public class TestAddOverReplicatedStripedBlocks { @Test public void testProcessOverReplicatedAndMissingStripedBlock() throws Exception { - long fileLen = CELLSIZE * DATA_BLK_NUM; + long fileLen = cellSize * dataBlocks; DFSTestUtil.createStripedFile(cluster, filePath, null, 1, - NUM_STRIPE_PER_BLOCK, false); + stripesPerBlock, false); LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations( filePath.toString(), 0, fileLen); LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0)); long gs = bg.getBlock().getGenerationStamp(); String bpid = bg.getBlock().getBlockPoolId(); long groupId = bg.getBlock().getBlockId(); - Block blk = new Block(groupId, BLOCK_SIZE, gs); + Block blk = new Block(groupId, blockSize, gs); // only inject GROUP_SIZE - 1 blocks, so there is one block missing - for (int i = 0; i < GROUP_SIZE - 1; i++) { + for (int i = 0; i < groupSize - 1; i++) { blk.setBlockId(groupId + i); cluster.injectBlocks(i, Arrays.asList(blk), bpid); } @@ -282,14 +285,14 @@ public class TestAddOverReplicatedStripedBlocks { lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen); bg = (LocatedStripedBlock) (lbs.get(0)); - assertEquals(GROUP_SIZE + 1, bg.getBlockIndices().length); - assertEquals(GROUP_SIZE + 1, bg.getLocations().length); - BitSet set = new BitSet(GROUP_SIZE); + assertEquals(groupSize + 1, bg.getBlockIndices().length); + assertEquals(groupSize + 1, bg.getLocations().length); + BitSet set = new BitSet(groupSize); for (byte index : bg.getBlockIndices()) { set.set(index); } - Assert.assertFalse(set.get(GROUP_SIZE - 1)); - for (int i = 0; i < GROUP_SIZE - 1; i++) { + Assert.assertFalse(set.get(groupSize - 1)); + for (int i = 0; i < groupSize - 1; i++) { assertTrue(set.get(i)); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java index 37b334f..ab24a25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas; @@ -41,12 +42,13 @@ import org.mockito.internal.util.reflection.Whitebox; import java.io.IOException; -import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE; -import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS; -import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS; - public class TestAddStripedBlockInFBR { - private final short GROUP_SIZE = (short) (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS); + private final ErasureCodingPolicy ecPolicy = + ErasureCodingPolicyManager.getSystemDefaultPolicy(); + private final int cellSize = ecPolicy.getCellSize(); + private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); + private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); + private final short groupSize = (short) (dataBlocks + parityBlocks); private MiniDFSCluster cluster; private DistributedFileSystem dfs; @@ -57,7 +59,7 @@ public class TestAddStripedBlockInFBR { @Before public void setup() throws IOException { Configuration conf = new HdfsConfiguration(); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); } @@ -87,14 +89,14 @@ public class TestAddStripedBlockInFBR { dfs.getClient().setErasureCodingPolicy(ecDir.toString(), null); // create several non-EC files and one EC file - final Path[] repFiles = new Path[GROUP_SIZE]; - for (int i = 0; i < GROUP_SIZE; i++) { + final Path[] repFiles = new Path[groupSize]; + for (int i = 0; i < groupSize; i++) { repFiles[i] = new Path(repDir, "f" + i); DFSTestUtil.createFile(dfs, repFiles[i], 1L, (short) 3, 0L); } final Path ecFile = new Path(ecDir, "f"); DFSTestUtil.createFile(dfs, ecFile, - BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS, (short) 1, 0L); + cellSize * dataBlocks, (short) 1, 0L); // trigger dn's FBR. The FBR will add block-dn mapping. DataNodeTestUtils.triggerBlockReport(dn); @@ -103,7 +105,7 @@ public class TestAddStripedBlockInFBR { BlockInfoStriped blockInfo = (BlockInfoStriped) cluster.getNamesystem() .getFSDirectory().getINode(ecFile.toString()).asFile().getLastBlock(); NumberReplicas nr = spy.countNodes(blockInfo); - Assert.assertEquals(GROUP_SIZE, nr.liveReplicas()); + Assert.assertEquals(groupSize, nr.liveReplicas()); Assert.assertEquals(0, nr.excessReplicas()); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index 768533a..ffd3fa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -24,11 +24,11 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -63,13 +63,16 @@ import java.util.List; import java.util.UUID; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; -import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE; -import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS; import static org.junit.Assert.assertEquals; public class TestAddStripedBlocks { - private final short GROUP_SIZE = (short) (StripedFileTestUtil.NUM_DATA_BLOCKS + - StripedFileTestUtil.NUM_PARITY_BLOCKS); + private final ErasureCodingPolicy ecPolicy = + ErasureCodingPolicyManager.getSystemDefaultPolicy(); + private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); + private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); + private final int cellSize = ecPolicy.getCellSize(); + private final short groupSize = (short) (ecPolicy.getNumDataUnits() + + ecPolicy.getNumParityUnits()); private MiniDFSCluster cluster; private DistributedFileSystem dfs; @@ -80,7 +83,7 @@ public class TestAddStripedBlocks { @Before public void setup() throws IOException { cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) - .numDataNodes(GROUP_SIZE).build(); + .numDataNodes(groupSize).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); dfs.getClient().setErasureCodingPolicy("/", null); @@ -206,16 +209,15 @@ public class TestAddStripedBlocks { boolean checkReplica) { assertEquals(0, block.numNodes()); Assert.assertFalse(block.isComplete()); - Assert.assertEquals(StripedFileTestUtil.NUM_DATA_BLOCKS, block.getDataBlockNum()); - Assert.assertEquals(StripedFileTestUtil.NUM_PARITY_BLOCKS, - block.getParityBlockNum()); + Assert.assertEquals(dataBlocks, block.getDataBlockNum()); + Assert.assertEquals(parityBlocks, block.getParityBlockNum()); Assert.assertEquals(0, block.getBlockId() & HdfsServerConstants.BLOCK_GROUP_INDEX_MASK); Assert.assertEquals(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, block.getBlockUCState()); if (checkReplica) { - Assert.assertEquals(GROUP_SIZE, + Assert.assertEquals(groupSize, block.getUnderConstructionFeature().getNumExpectedLocations()); DatanodeStorageInfo[] storages = block.getUnderConstructionFeature() .getExpectedStorageLocations(); @@ -259,8 +261,8 @@ public class TestAddStripedBlocks { Assert.assertTrue(lblk instanceof LocatedStripedBlock); DatanodeInfo[] datanodes = lblk.getLocations(); byte[] blockIndices = ((LocatedStripedBlock) lblk).getBlockIndices(); - Assert.assertEquals(GROUP_SIZE, datanodes.length); - Assert.assertEquals(GROUP_SIZE, blockIndices.length); + Assert.assertEquals(groupSize, datanodes.length); + Assert.assertEquals(groupSize, blockIndices.length); Assert.assertArrayEquals(indices, blockIndices); Assert.assertArrayEquals(expectedDNs, datanodes); } finally { @@ -291,8 +293,8 @@ public class TestAddStripedBlocks { DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature() .getExpectedStorageLocations(); byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices(); - Assert.assertEquals(GROUP_SIZE, locs.length); - Assert.assertEquals(GROUP_SIZE, indices.length); + Assert.assertEquals(groupSize, locs.length); + Assert.assertEquals(groupSize, indices.length); // 2. mimic incremental block reports and make sure the uc-replica list in // the BlockInfoUCStriped is correct @@ -314,8 +316,8 @@ public class TestAddStripedBlocks { // make sure lastBlock is correct and the storages have been updated locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations(); indices = lastBlock.getUnderConstructionFeature().getBlockIndices(); - Assert.assertEquals(GROUP_SIZE, locs.length); - Assert.assertEquals(GROUP_SIZE, indices.length); + Assert.assertEquals(groupSize, locs.length); + Assert.assertEquals(groupSize, indices.length); for (DatanodeStorageInfo newstorage : locs) { Assert.assertTrue(storageIDs.contains(newstorage.getStorageID())); } @@ -330,7 +332,7 @@ public class TestAddStripedBlocks { INodeFile fileNode = cluster.getNamesystem().getFSDirectory() .getINode4Write(file.toString()).asFile(); BlockInfo lastBlock = fileNode.getLastBlock(); - int i = GROUP_SIZE - 1; + int i = groupSize - 1; for (DataNode dn : cluster.getDataNodes()) { String storageID = storageIDs.get(i); final Block block = new Block(lastBlock.getBlockId() + i--, @@ -351,12 +353,12 @@ public class TestAddStripedBlocks { DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature() .getExpectedStorageLocations(); byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices(); - Assert.assertEquals(GROUP_SIZE, locs.length); - Assert.assertEquals(GROUP_SIZE, indices.length); - for (i = 0; i < GROUP_SIZE; i++) { + Assert.assertEquals(groupSize, locs.length); + Assert.assertEquals(groupSize, indices.length); + for (i = 0; i < groupSize; i++) { Assert.assertEquals(storageIDs.get(i), - locs[GROUP_SIZE - 1 - i].getStorageID()); - Assert.assertEquals(GROUP_SIZE - i - 1, indices[i]); + locs[groupSize - 1 - i].getStorageID()); + Assert.assertEquals(groupSize - i - 1, indices[i]); } } @@ -380,7 +382,7 @@ public class TestAddStripedBlocks { // Now send a block report with correct size DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString()); final Block reported = new Block(stored); - reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE); + reported.setNumBytes(numStripes * cellSize); StorageReceivedDeletedBlocks[] reports = DFSTestUtil .makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage); @@ -391,7 +393,7 @@ public class TestAddStripedBlocks { // Now send a block report with wrong size reported.setBlockId(stored.getBlockId() + 1); - reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE - 1); + reported.setNumBytes(numStripes * cellSize - 1); reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage); ns.processIncrementalBlockReport( @@ -400,8 +402,8 @@ public class TestAddStripedBlocks { Assert.assertEquals(1, ns.getCorruptReplicaBlocks()); // Now send a parity block report with correct size - reported.setBlockId(stored.getBlockId() + NUM_DATA_BLOCKS); - reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE); + reported.setBlockId(stored.getBlockId() + dataBlocks); + reported.setNumBytes(numStripes * cellSize); reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage); ns.processIncrementalBlockReport( @@ -410,8 +412,8 @@ public class TestAddStripedBlocks { Assert.assertEquals(1, ns.getCorruptReplicaBlocks()); // Now send a parity block report with wrong size - reported.setBlockId(stored.getBlockId() + NUM_DATA_BLOCKS); - reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE + 1); + reported.setBlockId(stored.getBlockId() + dataBlocks); + reported.setNumBytes(numStripes * cellSize + 1); reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage); ns.processIncrementalBlockReport( @@ -425,8 +427,8 @@ public class TestAddStripedBlocks { // Now change the size of stored block, and test verifying the last // block size stored.setNumBytes(stored.getNumBytes() + 10); - reported.setBlockId(stored.getBlockId() + NUM_DATA_BLOCKS + 2); - reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE); + reported.setBlockId(stored.getBlockId() + dataBlocks + 2); + reported.setNumBytes(numStripes * cellSize); reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage); ns.processIncrementalBlockReport( @@ -438,9 +440,9 @@ public class TestAddStripedBlocks { // Now send a parity block report with correct size based on adjusted // size of stored block /** Now stored block has {@link numStripes} full stripes + a cell + 10 */ - stored.setNumBytes(stored.getNumBytes() + BLOCK_STRIPED_CELL_SIZE); + stored.setNumBytes(stored.getNumBytes() + cellSize); reported.setBlockId(stored.getBlockId()); - reported.setNumBytes((numStripes + 1) * BLOCK_STRIPED_CELL_SIZE); + reported.setNumBytes((numStripes + 1) * cellSize); reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage); ns.processIncrementalBlockReport( @@ -450,7 +452,7 @@ public class TestAddStripedBlocks { Assert.assertEquals(3, bm.getCorruptReplicas(stored).size()); reported.setBlockId(stored.getBlockId() + 1); - reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE + 10); + reported.setNumBytes(numStripes * cellSize + 10); reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage); ns.processIncrementalBlockReport( @@ -459,8 +461,8 @@ public class TestAddStripedBlocks { Assert.assertEquals(1, ns.getCorruptReplicaBlocks()); Assert.assertEquals(3, bm.getCorruptReplicas(stored).size()); - reported.setBlockId(stored.getBlockId() + NUM_DATA_BLOCKS); - reported.setNumBytes((numStripes + 1) * BLOCK_STRIPED_CELL_SIZE); + reported.setBlockId(stored.getBlockId() + dataBlocks); + reported.setNumBytes((numStripes + 1) * cellSize); reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage); ns.processIncrementalBlockReport( http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index fe29e1c..f6e99ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -99,8 +98,8 @@ public class TestFSEditLogLoader { private static final int NUM_DATA_NODES = 0; - private static final ErasureCodingPolicy testECPolicy - = StripedFileTestUtil.TEST_EC_POLICY; + private final ErasureCodingPolicy testECPolicy + = ErasureCodingPolicyManager.getSystemDefaultPolicy(); @Test public void testDisplayRecentEditLogOpCodes() throws IOException { @@ -474,8 +473,8 @@ public class TestFSEditLogLoader { long blkId = 1; long blkNumBytes = 1024; long timestamp = 1426222918; - short blockNum = StripedFileTestUtil.NUM_DATA_BLOCKS; - short parityNum = StripedFileTestUtil.NUM_PARITY_BLOCKS; + short blockNum = (short) testECPolicy.getNumDataUnits(); + short parityNum = (short) testECPolicy.getNumParityUnits(); //set the storage policy of the directory fs.mkdir(new Path(testDir), new FsPermission("755")); @@ -547,8 +546,8 @@ public class TestFSEditLogLoader { long blkId = 1; long blkNumBytes = 1024; long timestamp = 1426222918; - short blockNum = StripedFileTestUtil.NUM_DATA_BLOCKS; - short parityNum = StripedFileTestUtil.NUM_PARITY_BLOCKS; + short blockNum = (short) testECPolicy.getNumDataUnits(); + short parityNum = (short) testECPolicy.getNumParityUnits(); //set the storage policy of the directory fs.mkdir(new Path(testDir), new FsPermission("755")); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java index b153652..9ec34d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -46,11 +45,12 @@ import java.io.IOException; public class TestQuotaWithStripedBlocks { private static final int BLOCK_SIZE = 1024 * 1024; private static final long DISK_QUOTA = BLOCK_SIZE * 10; - private static final ErasureCodingPolicy ecPolicy = + private final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy(); - private static final int NUM_DATA_BLOCKS = ecPolicy.getNumDataUnits(); - private static final int NUM_PARITY_BLOCKS = ecPolicy.getNumParityUnits(); - private static final int GROUP_SIZE = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS; + private final int dataBlocks = ecPolicy.getNumDataUnits(); + private final int parityBlocsk = ecPolicy.getNumParityUnits(); + private final int groupSize = dataBlocks + parityBlocsk; + private final int cellSize = ecPolicy.getCellSize(); private static final Path ecDir = new Path("/ec"); private MiniDFSCluster cluster; @@ -64,7 +64,7 @@ public class TestQuotaWithStripedBlocks { public void setUp() throws IOException { final Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster.waitActive(); dir = cluster.getNamesystem().getFSDirectory(); @@ -109,8 +109,8 @@ public class TestQuotaWithStripedBlocks { final long diskUsed = dirNode.getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK); // When we add a new block we update the quota using the full block size. - Assert.assertEquals(BLOCK_SIZE * GROUP_SIZE, spaceUsed); - Assert.assertEquals(BLOCK_SIZE * GROUP_SIZE, diskUsed); + Assert.assertEquals(BLOCK_SIZE * groupSize, spaceUsed); + Assert.assertEquals(BLOCK_SIZE * groupSize, diskUsed); dfs.getClient().getNamenode().complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId()); @@ -120,9 +120,9 @@ public class TestQuotaWithStripedBlocks { final long actualDiskUsed = dirNode.getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK); // In this case the file's real size is cell size * block group size. - Assert.assertEquals(StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE * GROUP_SIZE, + Assert.assertEquals(cellSize * groupSize, actualSpaceUsed); - Assert.assertEquals(StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE * GROUP_SIZE, + Assert.assertEquals(cellSize * groupSize, actualDiskUsed); } finally { IOUtils.cleanup(null, out); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java index c38b6b4..d43804a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; @@ -51,9 +52,6 @@ import org.slf4j.LoggerFactory; import java.util.BitSet; import java.util.List; -import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE; -import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS; -import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -61,9 +59,14 @@ import static org.junit.Assert.assertTrue; public class TestReconstructStripedBlocks { public static final Logger LOG = LoggerFactory.getLogger( TestReconstructStripedBlocks.class); - private static final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE; - private final short GROUP_SIZE = - (short) (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS); + private final ErasureCodingPolicy ecPolicy = + ErasureCodingPolicyManager.getSystemDefaultPolicy(); + private final int cellSize = ecPolicy.getCellSize(); + private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); + private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); + private final short groupSize = (short) (dataBlocks + parityBlocks); + private final int blockSize = 4 * cellSize; + private MiniDFSCluster cluster; private final Path dirPath = new Path("/dir"); @@ -88,7 +91,7 @@ public class TestReconstructStripedBlocks { @Test public void testMissingStripedBlockWithBusyNode() throws Exception { - for (int i = 1; i <= NUM_PARITY_BLOCKS; i++) { + for (int i = 1; i <= parityBlocks; i++) { doTestMissingStripedBlock(i, 1); } } @@ -105,7 +108,7 @@ public class TestReconstructStripedBlocks { throws Exception { Configuration conf = new HdfsConfiguration(); initConf(conf); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE + 1) + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 1) .build(); try { @@ -126,10 +129,10 @@ public class TestReconstructStripedBlocks { for (BlockInfo blk : blocks) { assertTrue(blk.isStriped()); assertTrue(blk.isComplete()); - assertEquals(BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS, + assertEquals(cellSize * dataBlocks, blk.getNumBytes()); final BlockInfoStriped sb = (BlockInfoStriped) blk; - assertEquals(GROUP_SIZE, sb.numNodes()); + assertEquals(groupSize, sb.numNodes()); } final BlockManager bm = cluster.getNamesystem().getBlockManager(); @@ -156,7 +159,7 @@ public class TestReconstructStripedBlocks { BlockManagerTestUtil.getComputedDatanodeWork(bm); // all the reconstruction work will be scheduled on the last DN - DataNode lastDn = cluster.getDataNodes().get(GROUP_SIZE); + DataNode lastDn = cluster.getDataNodes().get(groupSize); DatanodeDescriptor last = bm.getDatanodeManager().getDatanode(lastDn.getDatanodeId()); assertEquals("Counting the number of outstanding EC tasks", numBlocks, @@ -168,15 +171,15 @@ public class TestReconstructStripedBlocks { assertEquals(last, info.getTargetDnInfos()[0]); assertEquals(info.getSourceDnInfos().length, info.getLiveBlockIndices().length); - if (GROUP_SIZE - numOfMissed == NUM_DATA_BLOCKS) { + if (groupSize - numOfMissed == dataBlocks) { // It's a QUEUE_HIGHEST_PRIORITY block, so the busy DNs will be chosen // to make sure we have NUM_DATA_BLOCKS DNs to do reconstruction // work. - assertEquals(NUM_DATA_BLOCKS, info.getSourceDnInfos().length); + assertEquals(dataBlocks, info.getSourceDnInfos().length); } else { // The block has no highest priority, so we don't use the busy DNs as // sources - assertEquals(GROUP_SIZE - numOfMissed - numOfBusy, + assertEquals(groupSize - numOfMissed - numOfBusy, info.getSourceDnInfos().length); } } @@ -190,15 +193,15 @@ public class TestReconstructStripedBlocks { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); - conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, StripedFileTestUtil.blockSize); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE + 2) + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2) .build(); try { cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); BlockManager bm = cluster.getNamesystem().getBlockManager(); fs.getClient().setErasureCodingPolicy("/", null); - int fileLen = NUM_DATA_BLOCKS * StripedFileTestUtil.blockSize; + int fileLen = dataBlocks * blockSize; Path p = new Path("/test2RecoveryTasksForSameBlockGroup"); final byte[] data = new byte[fileLen]; DFSTestUtil.writeFile(fs, p, data); @@ -206,7 +209,7 @@ public class TestReconstructStripedBlocks { LocatedStripedBlock lb = (LocatedStripedBlock)fs.getClient() .getLocatedBlocks(p.toString(), 0).get(0); LocatedBlock[] lbs = StripedBlockUtil.parseStripedBlockGroup(lb, - cellSize, NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS); + cellSize, dataBlocks, parityBlocks); assertEquals(0, getNumberOfBlocksToBeErasureCoded(cluster)); assertEquals(0, bm.getPendingReconstructionBlocksCount()); @@ -255,7 +258,7 @@ public class TestReconstructStripedBlocks { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE + 2) + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2) .build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); @@ -264,7 +267,7 @@ public class TestReconstructStripedBlocks { fs.mkdirs(dirPath); fs.setErasureCodingPolicy(dirPath, null); DFSTestUtil.createFile(fs, filePath, - BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2, (short) 1, 0L); + cellSize * dataBlocks * 2, (short) 1, 0L); // stop a dn LocatedBlocks blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0); @@ -275,7 +278,7 @@ public class TestReconstructStripedBlocks { cluster.setDataNodeDead(dnToStop); // wait for reconstruction to happen - DFSTestUtil.waitForReplication(fs, filePath, GROUP_SIZE, 15 * 1000); + DFSTestUtil.waitForReplication(fs, filePath, groupSize, 15 * 1000); // bring the dn back: 10 internal blocks now cluster.restartDataNode(dnProp); @@ -304,7 +307,7 @@ public class TestReconstructStripedBlocks { // check if NN can detect the missing internal block and finish the // reconstruction StripedFileTestUtil.waitForReconstructionFinished(filePath, fs, - GROUP_SIZE); + groupSize); boolean reconstructed = false; for (int i = 0; i < 5; i++) { NumberReplicas num = null; @@ -316,7 +319,7 @@ public class TestReconstructStripedBlocks { } finally { fsn.readUnlock(); } - if (num.liveReplicas() >= GROUP_SIZE) { + if (num.liveReplicas() >= groupSize) { reconstructed = true; break; } else { @@ -327,11 +330,11 @@ public class TestReconstructStripedBlocks { blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0); block = (LocatedStripedBlock) blks.getLastLocatedBlock(); - BitSet bitSet = new BitSet(GROUP_SIZE); + BitSet bitSet = new BitSet(groupSize); for (byte index : block.getBlockIndices()) { bitSet.set(index); } - for (int i = 0; i < GROUP_SIZE; i++) { + for (int i = 0; i < groupSize; i++) { Assert.assertTrue(bitSet.get(i)); } } finally { http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index 3703501..6c4d73c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.NameNodeProxies; -import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -305,8 +304,7 @@ public class TestStripedINodeFile { public void testUnsuitableStoragePoliciesWithECStripedMode() throws Exception { final Configuration conf = new HdfsConfiguration(); - int defaultStripedBlockSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE - * 4; + int defaultStripedBlockSize = testECPolicy.getCellSize() * 4; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java index 6d4d797..6fd0a1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java @@ -31,30 +31,33 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.StripedFileTestUtil; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; +import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.INodeFile; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.After; +import org.junit.Before; import org.junit.Test; public class TestOfflineImageViewerWithStripedBlocks { - private static int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS; - private static int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS; + private final ErasureCodingPolicy ecPolicy = + ErasureCodingPolicyManager.getSystemDefaultPolicy(); + private int dataBlocks = ecPolicy.getNumDataUnits(); + private int parityBlocks = ecPolicy.getNumParityUnits(); private static MiniDFSCluster cluster; private static DistributedFileSystem fs; - private static final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE; - private static final int stripesPerBlock = 3; - private static final int blockSize = cellSize * stripesPerBlock; + private final int cellSize = ecPolicy.getCellSize(); + private final int stripesPerBlock = 3; + private final int blockSize = cellSize * stripesPerBlock; - @BeforeClass - public static void setup() throws IOException { + @Before + public void setup() throws IOException { int numDNs = dataBlocks + parityBlocks + 2; Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); @@ -66,8 +69,8 @@ public class TestOfflineImageViewerWithStripedBlocks { fs.mkdirs(eczone); } - @AfterClass - public static void tearDown() { + @After + public void tearDown() { if (cluster != null) { cluster.shutdown(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java index 999eb1f..c1728ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.util; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -81,15 +80,15 @@ import static org.junit.Assert.assertFalse; */ public class TestStripedBlockUtil { // use hard coded policy - see HDFS-9816 - private final ErasureCodingPolicy EC_POLICY = + private final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemPolicies()[0]; - private final short DATA_BLK_NUM = (short) EC_POLICY.getNumDataUnits(); - private final short PARITY_BLK_NUM = (short) EC_POLICY.getNumParityUnits(); - private final short BLK_GROUP_WIDTH = (short) (DATA_BLK_NUM + PARITY_BLK_NUM); - private final int CELLSIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE; - private final int FULL_STRIPE_SIZE = DATA_BLK_NUM * CELLSIZE; + private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); + private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); + private final short groupSize = (short) (dataBlocks + parityBlocks); + private final int cellSize = ecPolicy.getCellSize(); + private final int stripeSize = dataBlocks * cellSize; /** number of full stripes in a full block group */ - private final int BLK_GROUP_STRIPE_NUM = 16; + private final int stripesPerBlock = 16; private final Random random = new Random(); private int[] blockGroupSizes; @@ -101,23 +100,23 @@ public class TestStripedBlockUtil { @Before public void setup(){ - blockGroupSizes = new int[]{1, getDelta(CELLSIZE), CELLSIZE, - getDelta(DATA_BLK_NUM) * CELLSIZE, - getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE), - FULL_STRIPE_SIZE, FULL_STRIPE_SIZE + getDelta(CELLSIZE), - FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE, - FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE), - getDelta(BLK_GROUP_STRIPE_NUM) * FULL_STRIPE_SIZE, - BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE}; - byteRangeStartOffsets = new int[] {0, getDelta(CELLSIZE), CELLSIZE - 1}; - byteRangeSizes = new int[]{1, getDelta(CELLSIZE), CELLSIZE, - getDelta(DATA_BLK_NUM) * CELLSIZE, - getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE), - FULL_STRIPE_SIZE, FULL_STRIPE_SIZE + getDelta(CELLSIZE), - FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE, - FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE), - getDelta(BLK_GROUP_STRIPE_NUM) * FULL_STRIPE_SIZE, - BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE}; + blockGroupSizes = new int[]{1, getDelta(cellSize), cellSize, + getDelta(dataBlocks) * cellSize, + getDelta(dataBlocks) * cellSize + getDelta(cellSize), + stripeSize, stripeSize + getDelta(cellSize), + stripeSize + getDelta(dataBlocks) * cellSize, + stripeSize + getDelta(dataBlocks) * cellSize + getDelta(cellSize), + getDelta(stripesPerBlock) * stripeSize, + stripesPerBlock * stripeSize}; + byteRangeStartOffsets = new int[] {0, getDelta(cellSize), cellSize - 1}; + byteRangeSizes = new int[]{1, getDelta(cellSize), cellSize, + getDelta(dataBlocks) * cellSize, + getDelta(dataBlocks) * cellSize + getDelta(cellSize), + stripeSize, stripeSize + getDelta(cellSize), + stripeSize + getDelta(dataBlocks) * cellSize, + stripeSize + getDelta(dataBlocks) * cellSize + getDelta(cellSize), + getDelta(stripesPerBlock) * stripeSize, + stripesPerBlock * stripeSize}; } private int getDelta(int size) { @@ -130,12 +129,12 @@ public class TestStripedBlockUtil { private LocatedStripedBlock createDummyLocatedBlock(int bgSize) { final long blockGroupID = -1048576; - DatanodeInfo[] locs = new DatanodeInfo[BLK_GROUP_WIDTH]; - String[] storageIDs = new String[BLK_GROUP_WIDTH]; - StorageType[] storageTypes = new StorageType[BLK_GROUP_WIDTH]; - byte[] indices = new byte[BLK_GROUP_WIDTH]; - for (int i = 0; i < BLK_GROUP_WIDTH; i++) { - indices[i] = (byte) ((i + 2) % DATA_BLK_NUM); + DatanodeInfo[] locs = new DatanodeInfo[groupSize]; + String[] storageIDs = new String[groupSize]; + StorageType[] storageTypes = new StorageType[groupSize]; + byte[] indices = new byte[groupSize]; + for (int i = 0; i < groupSize; i++) { + indices[i] = (byte) ((i + 2) % dataBlocks); // Location port always equal to logical index of a block, // for easier verification locs[i] = DFSTestUtil.getLocalDatanodeInfo(indices[i]); @@ -148,20 +147,21 @@ public class TestStripedBlockUtil { } private byte[][] createInternalBlkBuffers(int bgSize) { - byte[][] bufs = new byte[DATA_BLK_NUM + PARITY_BLK_NUM][]; - int[] pos = new int[DATA_BLK_NUM + PARITY_BLK_NUM]; - for (int i = 0; i < DATA_BLK_NUM + PARITY_BLK_NUM; i++) { + byte[][] bufs = new byte[dataBlocks + parityBlocks][]; + int[] pos = new int[dataBlocks + parityBlocks]; + for (int i = 0; i < dataBlocks + parityBlocks; i++) { int bufSize = (int) getInternalBlockLength( - bgSize, CELLSIZE, DATA_BLK_NUM, i); + bgSize, cellSize, dataBlocks, i); bufs[i] = new byte[bufSize]; pos[i] = 0; } int done = 0; while (done < bgSize) { - Preconditions.checkState(done % CELLSIZE == 0); - StripingCell cell = new StripingCell(EC_POLICY, CELLSIZE, done / CELLSIZE, 0); + Preconditions.checkState(done % cellSize == 0); + StripingCell cell = + new StripingCell(ecPolicy, cellSize, done / cellSize, 0); int idxInStripe = cell.idxInStripe; - int size = Math.min(CELLSIZE, bgSize - done); + int size = Math.min(cellSize, bgSize - done); for (int i = 0; i < size; i++) { bufs[idxInStripe][pos[idxInStripe] + i] = hashIntToByte(done + i); } @@ -175,11 +175,11 @@ public class TestStripedBlockUtil { @Test public void testParseDummyStripedBlock() { LocatedStripedBlock lsb = createDummyLocatedBlock( - BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE); + stripeSize * stripesPerBlock); LocatedBlock[] blocks = parseStripedBlockGroup( - lsb, CELLSIZE, DATA_BLK_NUM, PARITY_BLK_NUM); - assertEquals(DATA_BLK_NUM + PARITY_BLK_NUM, blocks.length); - for (int i = 0; i < DATA_BLK_NUM; i++) { + lsb, cellSize, dataBlocks, parityBlocks); + assertEquals(dataBlocks + parityBlocks, blocks.length); + for (int i = 0; i < dataBlocks; i++) { assertFalse(blocks[i].isStriped()); assertEquals(i, BlockIdManager.getBlockIndex(blocks[i].getBlock().getLocalBlock())); @@ -191,9 +191,9 @@ public class TestStripedBlockUtil { } private void verifyInternalBlocks (int numBytesInGroup, int[] expected) { - for (int i = 1; i < BLK_GROUP_WIDTH; i++) { + for (int i = 1; i < groupSize; i++) { assertEquals(expected[i], - getInternalBlockLength(numBytesInGroup, CELLSIZE, DATA_BLK_NUM, i)); + getInternalBlockLength(numBytesInGroup, cellSize, dataBlocks, i)); } } @@ -203,38 +203,38 @@ public class TestStripedBlockUtil { final int delta = 10; // Block group is smaller than a cell - verifyInternalBlocks(CELLSIZE - delta, - new int[] {CELLSIZE - delta, 0, 0, 0, 0, 0, - CELLSIZE - delta, CELLSIZE - delta, CELLSIZE - delta}); + verifyInternalBlocks(cellSize - delta, + new int[] {cellSize - delta, 0, 0, 0, 0, 0, + cellSize - delta, cellSize - delta, cellSize - delta}); // Block group is exactly as large as a cell - verifyInternalBlocks(CELLSIZE, - new int[] {CELLSIZE, 0, 0, 0, 0, 0, - CELLSIZE, CELLSIZE, CELLSIZE}); + verifyInternalBlocks(cellSize, + new int[] {cellSize, 0, 0, 0, 0, 0, + cellSize, cellSize, cellSize}); // Block group is a little larger than a cell - verifyInternalBlocks(CELLSIZE + delta, - new int[] {CELLSIZE, delta, 0, 0, 0, 0, - CELLSIZE, CELLSIZE, CELLSIZE}); + verifyInternalBlocks(cellSize + delta, + new int[] {cellSize, delta, 0, 0, 0, 0, + cellSize, cellSize, cellSize}); // Block group contains multiple stripes and ends at stripe boundary - verifyInternalBlocks(2 * DATA_BLK_NUM * CELLSIZE, - new int[] {2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE, - 2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE, - 2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE}); + verifyInternalBlocks(2 * dataBlocks * cellSize, + new int[] {2 * cellSize, 2 * cellSize, 2 * cellSize, + 2 * cellSize, 2 * cellSize, 2 * cellSize, + 2 * cellSize, 2 * cellSize, 2 * cellSize}); // Block group contains multiple stripes and ends at cell boundary // (not ending at stripe boundary) - verifyInternalBlocks(2 * DATA_BLK_NUM * CELLSIZE + CELLSIZE, - new int[] {3 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE, - 2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE, - 3 * CELLSIZE, 3 * CELLSIZE, 3 * CELLSIZE}); + verifyInternalBlocks(2 * dataBlocks * cellSize + cellSize, + new int[] {3 * cellSize, 2 * cellSize, 2 * cellSize, + 2 * cellSize, 2 * cellSize, 2 * cellSize, + 3 * cellSize, 3 * cellSize, 3 * cellSize}); // Block group contains multiple stripes and doesn't end at cell boundary - verifyInternalBlocks(2 * DATA_BLK_NUM * CELLSIZE - delta, - new int[] {2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE, - 2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE - delta, - 2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE}); + verifyInternalBlocks(2 * dataBlocks * cellSize - delta, + new int[] {2 * cellSize, 2 * cellSize, 2 * cellSize, + 2 * cellSize, 2 * cellSize, 2 * cellSize - delta, + 2 * cellSize, 2 * cellSize, 2 * cellSize}); } /** @@ -244,7 +244,7 @@ public class TestStripedBlockUtil { @Test public void testDivideByteRangeIntoStripes() { ByteBuffer assembled = - ByteBuffer.allocate(BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE); + ByteBuffer.allocate(stripesPerBlock * stripeSize); for (int bgSize : blockGroupSizes) { LocatedStripedBlock blockGroup = createDummyLocatedBlock(bgSize); byte[][] internalBlkBufs = createInternalBlkBuffers(bgSize); @@ -253,11 +253,11 @@ public class TestStripedBlockUtil { if (brStart + brSize > bgSize) { continue; } - AlignedStripe[] stripes = divideByteRangeIntoStripes(EC_POLICY, - CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled); + AlignedStripe[] stripes = divideByteRangeIntoStripes(ecPolicy, + cellSize, blockGroup, brStart, brStart + brSize - 1, assembled); for (AlignedStripe stripe : stripes) { - for (int i = 0; i < DATA_BLK_NUM; i++) { + for (int i = 0; i < dataBlocks; i++) { StripingChunk chunk = stripe.chunks[i]; if (chunk == null || chunk.state != StripingChunk.REQUESTED) { continue; --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
