http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
index b532443..1574a03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static 
org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
@@ -39,23 +36,18 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -90,10 +82,14 @@ public class TestDecommissionWithStriped {
   private Configuration conf;
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
   private int numDNs;
-  private final int blockSize = StripedFileTestUtil.blockSize;
-  private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int dataBlocks = ecPolicy.getNumDataUnits();
+  private final int parityBlocks = ecPolicy.getNumParityUnits();
+  private final int blockSize = cellSize * 4;
+  private final int blockGroupSize = blockSize * dataBlocks;
   private final Path ecDir = new Path("/" + this.getClass().getSimpleName());
 
   private FSNamesystem fsn;
@@ -132,12 +128,12 @@ public class TestDecommissionWithStriped {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
-        StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE - 1);
+        cellSize - 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
         false);
 
-    numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
+    numDNs = dataBlocks + parityBlocks + 2;
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.waitActive();
     dfs = cluster.getFileSystem(0);
@@ -194,7 +190,7 @@ public class TestDecommissionWithStriped {
     LOG.info("Starting test testDecommissionWithURBlocksForSameBlockGroup");
 
     final Path ecFile = new Path(ecDir, "testDecommissionWithCorruptBlocks");
-    int writeBytes = BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2;
+    int writeBytes = cellSize * dataBlocks * 2;
     writeStripedFile(dfs, ecFile, writeBytes);
     Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
 
@@ -202,8 +198,8 @@ public class TestDecommissionWithStriped {
     LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0)
         .get(0);
     DatanodeInfo[] dnLocs = lb.getLocations();
-    assertEquals(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS, dnLocs.length);
-    int decommNodeIndex = NUM_DATA_BLOCKS - 1;
+    assertEquals(dataBlocks + parityBlocks, dnLocs.length);
+    int decommNodeIndex = dataBlocks - 1;
     int stopNodeIndex = 1;
 
     // add the nodes which will be decommissioning
@@ -273,7 +269,7 @@ public class TestDecommissionWithStriped {
 
     assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
     StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
-        null);
+        null, blockGroupSize);
     cleanupFile(dfs, ecFile);
   }
 
@@ -294,7 +290,7 @@ public class TestDecommissionWithStriped {
     LOG.info("Starting test testFileChecksumAfterDecommission");
 
     final Path ecFile = new Path(ecDir, "testFileChecksumAfterDecommission");
-    int writeBytes = BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS;
+    int writeBytes = cellSize * dataBlocks;
     writeStripedFile(dfs, ecFile, writeBytes);
     Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
     FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
@@ -303,7 +299,7 @@ public class TestDecommissionWithStriped {
     LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0)
         .get(0);
     DatanodeInfo[] dnLocs = lb.getLocations();
-    assertEquals(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS, dnLocs.length);
+    assertEquals(dataBlocks + parityBlocks, dnLocs.length);
     int decommNodeIndex = 1;
 
     // add the node which will be decommissioning
@@ -312,7 +308,7 @@ public class TestDecommissionWithStriped {
     assertEquals(decommisionNodes.size(), fsn.getNumDecomLiveDataNodes());
     assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
     StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
-        null);
+        null, blockGroupSize);
 
     // verify checksum
     FileChecksum fileChecksum2 = dfs.getFileChecksum(ecFile, writeBytes);
@@ -355,7 +351,7 @@ public class TestDecommissionWithStriped {
 
     assertNull(checkFile(dfs, ecFile, storageCount, decommisionNodes, numDNs));
     StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
-        null);
+        null, blockGroupSize);
 
     assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
 
@@ -437,7 +433,7 @@ public class TestDecommissionWithStriped {
     StripedFileTestUtil.waitBlockGroupsReported(dfs, ecFile.toString());
 
     StripedFileTestUtil.checkData(dfs, ecFile, writeBytes,
-        new ArrayList<DatanodeInfo>(), null);
+        new ArrayList<DatanodeInfo>(), null, blockGroupSize);
   }
 
   private void writeConfigFile(Path name, List<String> nodes)
@@ -528,7 +524,7 @@ public class TestDecommissionWithStriped {
    * decommissioned nodes, verify their replication is equal to what is
    * specified.
    *
-   * @param downnode
+   * @param decommissionedNodes
    *          - if null, there is no decommissioned node for this file.
    * @return - null if no failure found, else an error message string.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
index 5b63cc4..e02c0bf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
@@ -39,16 +39,17 @@ public class TestErasureCodingPolicyWithSnapshot {
   private DistributedFileSystem fs;
   private Configuration conf;
 
-  private final static short GROUP_SIZE = (short) (StripedFileTestUtil.
-      NUM_DATA_BLOCKS + StripedFileTestUtil.NUM_PARITY_BLOCKS);
   private final static int SUCCESS = 0;
   private final ErasureCodingPolicy sysDefaultPolicy =
-      StripedFileTestUtil.TEST_EC_POLICY;
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final short groupSize = (short) (
+      sysDefaultPolicy.getNumDataUnits() +
+          sysDefaultPolicy.getNumParityUnits());
 
   @Before
   public void setupCluster() throws IOException {
     conf = new HdfsConfiguration();
-    cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
index 8f83ba5..1c75f90 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
@@ -22,9 +22,11 @@ import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -44,16 +46,17 @@ import java.io.IOException;
 public class TestFileChecksum {
   private static final Logger LOG = LoggerFactory
       .getLogger(TestFileChecksum.class);
-
-  private int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private int dataBlocks = ecPolicy.getNumDataUnits();
+  private int parityBlocks = ecPolicy.getNumParityUnits();
 
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private Configuration conf;
   private DFSClient client;
 
-  private int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
+  private int cellSize = ecPolicy.getCellSize();
   private int stripesPerBlock = 6;
   private int blockSize = cellSize * stripesPerBlock;
   private int numBlockGroups = 10;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index f7bac28..ca8f6db 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -55,22 +56,19 @@ public class TestLeaseRecoveryStriped {
   public static final Log LOG = LogFactory
       .getLog(TestLeaseRecoveryStriped.class);
 
-  private static final ErasureCodingPolicy ecPolicy =
-      StripedFileTestUtil.TEST_EC_POLICY;
-  private static final int NUM_DATA_BLOCKS = 
StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private static final int NUM_PARITY_BLOCKS = 
StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  private static final int CELL_SIZE = 
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private static final int STRIPE_SIZE = NUM_DATA_BLOCKS * CELL_SIZE;
-  private static final int STRIPES_PER_BLOCK = 15;
-  private static final int BLOCK_SIZE = CELL_SIZE * STRIPES_PER_BLOCK;
-  private static final int BLOCK_GROUP_SIZE = BLOCK_SIZE * NUM_DATA_BLOCKS;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int dataBlocks = ecPolicy.getNumDataUnits();
+  private final int parityBlocks = ecPolicy.getNumParityUnits();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripSize = dataBlocks * cellSize;
+  private final int stripesPerBlock = 15;
+  private final int blockSize = cellSize * stripesPerBlock;
+  private final int blockGroupSize = blockSize * dataBlocks;
   private static final int bytesPerChecksum = 512;
 
   static {
     GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
-    StripedFileTestUtil.stripesPerBlock = STRIPES_PER_BLOCK;
-    StripedFileTestUtil.blockSize = BLOCK_SIZE;
-    StripedFileTestUtil.BLOCK_GROUP_SIZE = BLOCK_GROUP_SIZE;
   }
 
   static private final String fakeUsername = "fakeUser1";
@@ -85,13 +83,13 @@ public class TestLeaseRecoveryStriped {
   @Before
   public void setup() throws IOException {
     conf = new HdfsConfiguration();
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6000L);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
         false);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
-    final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
+    final int numDNs = dataBlocks + parityBlocks;
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.waitActive();
     dfs = cluster.getFileSystem();
@@ -106,7 +104,7 @@ public class TestLeaseRecoveryStriped {
     }
   }
 
-  private static int[][][] getBlockLengthsSuite() {
+  private int[][][] getBlockLengthsSuite() {
     final int groups = 4;
     final int minNumCell = 3;
     final int maxNumCell = 11;
@@ -120,13 +118,13 @@ public class TestLeaseRecoveryStriped {
         delta = bytesPerChecksum;
       }
       int[][] suite = new int[2][];
-      int[] lens = new int[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS];
+      int[] lens = new int[dataBlocks + parityBlocks];
       long[] lenInLong = new long[lens.length];
       for (int j = 0; j < lens.length; j++) {
         int numCell = random.nextInt(maxNumCell - minNumCell + 1) + minNumCell;
-        int numDelta = j < NUM_DATA_BLOCKS ?
+        int numDelta = j < dataBlocks ?
             random.nextInt(maxNumDelta - minNumDelta + 1) + minNumDelta : 0;
-        lens[j] = CELL_SIZE * numCell + delta * numDelta;
+        lens[j] = cellSize * numCell + delta * numDelta;
         lenInLong[j] = lens[j];
       }
       suite[0] = lens;
@@ -137,13 +135,13 @@ public class TestLeaseRecoveryStriped {
     return blkLenSuite;
   }
 
-  private static final int[][][] BLOCK_LENGTHS_SUITE = getBlockLengthsSuite();
+  private final int[][][] blockLengthsSuite = getBlockLengthsSuite();
 
   @Test
   public void testLeaseRecovery() throws Exception {
-    for (int i = 0; i < BLOCK_LENGTHS_SUITE.length; i++) {
-      int[] blockLengths = BLOCK_LENGTHS_SUITE[i][0];
-      int safeLength = BLOCK_LENGTHS_SUITE[i][1][0];
+    for (int i = 0; i < blockLengthsSuite.length; i++) {
+      int[] blockLengths = blockLengthsSuite[i][0];
+      int safeLength = blockLengthsSuite[i][1][0];
       try {
         runTest(blockLengths, safeLength);
       } catch (Throwable e) {
@@ -162,20 +160,20 @@ public class TestLeaseRecoveryStriped {
     List<Long> oldGS = new ArrayList<>();
     oldGS.add(1001L);
     StripedFileTestUtil.checkData(dfs, p, safeLength,
-        new ArrayList<DatanodeInfo>(), oldGS);
+        new ArrayList<DatanodeInfo>(), oldGS, blockGroupSize);
     // After recovery, storages are reported by primary DN. we should verify
     // storages reported by blockReport.
     cluster.restartNameNode(true);
     cluster.waitFirstBRCompleted(0, 10000);
     StripedFileTestUtil.checkData(dfs, p, safeLength,
-        new ArrayList<DatanodeInfo>(), oldGS);
+        new ArrayList<DatanodeInfo>(), oldGS, blockGroupSize);
   }
 
   private void writePartialBlocks(int[] blockLengths) throws Exception {
     final FSDataOutputStream out = dfs.create(p);
     final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out
         .getWrappedStream();
-    int length = (STRIPES_PER_BLOCK - 1) * STRIPE_SIZE;
+    int length = (stripesPerBlock - 1) * stripSize;
     int[] posToKill = getPosToKill(blockLengths);
     int checkingPos = nextCheckingPos(posToKill, 0);
     try {
@@ -209,20 +207,20 @@ public class TestLeaseRecoveryStriped {
   }
 
   private int[] getPosToKill(int[] blockLengths) {
-    int[] posToKill = new int[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS];
-    for (int i = 0; i < NUM_DATA_BLOCKS; i++) {
-      int numStripe = (blockLengths[i] - 1) / CELL_SIZE;
-      posToKill[i] = numStripe * STRIPE_SIZE + i * CELL_SIZE
-          + blockLengths[i] % CELL_SIZE;
-      if (blockLengths[i] % CELL_SIZE == 0) {
-        posToKill[i] += CELL_SIZE;
+    int[] posToKill = new int[dataBlocks + parityBlocks];
+    for (int i = 0; i < dataBlocks; i++) {
+      int numStripe = (blockLengths[i] - 1) / cellSize;
+      posToKill[i] = numStripe * stripSize + i * cellSize
+          + blockLengths[i] % cellSize;
+      if (blockLengths[i] % cellSize == 0) {
+        posToKill[i] += cellSize;
       }
     }
-    for (int i = NUM_DATA_BLOCKS; i < NUM_DATA_BLOCKS
-        + NUM_PARITY_BLOCKS; i++) {
-      Preconditions.checkArgument(blockLengths[i] % CELL_SIZE == 0);
-      int numStripe = (blockLengths[i]) / CELL_SIZE;
-      posToKill[i] = numStripe * STRIPE_SIZE;
+    for (int i = dataBlocks; i < dataBlocks
+        + parityBlocks; i++) {
+      Preconditions.checkArgument(blockLengths[i] % cellSize == 0);
+      int numStripe = (blockLengths[i]) / cellSize;
+      posToKill[i] = numStripe * stripSize;
     }
     return posToKill;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
index 6d2227f..6014332 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -34,6 +35,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -52,9 +54,6 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
-
 public class TestReadStripedFileWithDecoding {
   static final Log LOG = 
LogFactory.getLog(TestReadStripedFileWithDecoding.class);
 
@@ -68,15 +67,22 @@ public class TestReadStripedFileWithDecoding {
 
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
-  private static final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private static final short parityBlocks = 
StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private final int smallFileLength = blockSize * dataBlocks - 123;
-  private final int largeFileLength = blockSize * dataBlocks + 123;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks =
+      (short) ecPolicy.getNumParityUnits();
+  private final int numDNs = dataBlocks + parityBlocks;
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripPerBlock = 4;
+  private final int blockSize = cellSize * stripPerBlock;
+  private final int blockGroupSize = blockSize * dataBlocks;
+  private final int smallFileLength = blockGroupSize - 123;
+  private final int largeFileLength = blockGroupSize + 123;
   private final int[] fileLengths = {smallFileLength, largeFileLength};
-  private static final int[] dnFailureNums = getDnFailureNums();
+  private final int[] dnFailureNums = getDnFailureNums();
 
-  private static int[] getDnFailureNums() {
+  private int[] getDnFailureNums() {
     int[] dnFailureNums = new int[parityBlocks];
     for (int i = 0; i < dnFailureNums.length; i++) {
       dnFailureNums[i] = i + 1;
@@ -191,7 +197,8 @@ public class TestReadStripedFileWithDecoding {
     StripedFileTestUtil.verifyStatefulRead(fs, testPath, length, expected, 
buffer);
     StripedFileTestUtil.verifyStatefulRead(fs, testPath, length, expected,
         ByteBuffer.allocate(length + 100));
-    StripedFileTestUtil.verifySeek(fs, testPath, length);
+    StripedFileTestUtil.verifySeek(fs, testPath, length, ecPolicy,
+        blockGroupSize);
   }
 
   private void testReadWithDNFailure(int fileLength, int dnFailureNum)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
index fe89401..2ee8663 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
@@ -22,7 +22,9 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Rule;
@@ -30,11 +32,6 @@ import org.junit.rules.Timeout;
 
 import java.io.IOException;
 
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.TEST_EC_POLICY;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
-
 /**
  * Test reading a striped file when some of its blocks are missing (not 
included
  * in the block locations returned by the NameNode).
@@ -45,8 +42,15 @@ public class TestReadStripedFileWithMissingBlocks {
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private Configuration conf = new HdfsConfiguration();
-  private final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripPerBlock = 4;
+  private final int blockSize = stripPerBlock * cellSize;
+  private final int blockGroupSize = blockSize * dataBlocks;
+  private final int numDNs = dataBlocks + parityBlocks;
   private final int fileLength = blockSize * dataBlocks + 123;
 
   @Rule
@@ -57,7 +61,7 @@ public class TestReadStripedFileWithMissingBlocks {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.getFileSystem().getClient().setErasureCodingPolicy(
-        "/", TEST_EC_POLICY);
+        "/", ecPolicy);
     fs = cluster.getFileSystem();
   }
 
@@ -70,9 +74,9 @@ public class TestReadStripedFileWithMissingBlocks {
 
   @Test
   public void testReadFileWithMissingBlocks() throws Exception {
-    for (int missingData = 1; missingData <= NUM_PARITY_BLOCKS; missingData++) 
{
+    for (int missingData = 1; missingData <= dataBlocks; missingData++) {
       for (int missingParity = 0; missingParity <=
-          NUM_PARITY_BLOCKS - missingData; missingParity++) {
+          parityBlocks - missingData; missingParity++) {
         try {
           setup();
           readFileWithMissingBlocks(new Path("/foo"), fileLength,
@@ -102,7 +106,7 @@ public class TestReadStripedFileWithMissingBlocks {
     }
     for (int i = 0; i < missingParityNum; i++) {
       missingDataNodes[i + missingDataNum] = i +
-          Math.min(StripedFileTestUtil.NUM_DATA_BLOCKS, dataBlocks);
+          Math.min(ecPolicy.getNumDataUnits(), dataBlocks);
     }
     stopDataNodes(locs, missingDataNodes);
 
@@ -112,7 +116,8 @@ public class TestReadStripedFileWithMissingBlocks {
 
     byte[] smallBuf = new byte[1024];
     byte[] largeBuf = new byte[fileLength + 100];
-    StripedFileTestUtil.verifySeek(fs, srcPath, fileLength);
+    StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
+        blockGroupSize);
     StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
         smallBuf);
     StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, 
largeBuf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 10fc43e..a3d9ef6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
@@ -60,12 +61,14 @@ import org.junit.Test;
 public class TestReconstructStripedFile {
   public static final Log LOG = 
LogFactory.getLog(TestReconstructStripedFile.class);
 
-  private static final int dataBlkNum = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private static final int parityBlkNum = 
StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  private static final int cellSize = 
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private static final int blockSize = cellSize * 3;
-  private static final int groupSize = dataBlkNum + parityBlkNum;
-  private static final int dnNum = groupSize + parityBlkNum;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int dataBlkNum = ecPolicy.getNumDataUnits();
+  private final int parityBlkNum = ecPolicy.getNumParityUnits();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int blockSize = cellSize * 3;
+  private final int groupSize = dataBlkNum + parityBlkNum;
+  private final int dnNum = groupSize + parityBlkNum;
 
   static {
     GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
index 2b6b65a..2d37c06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
@@ -23,7 +23,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.junit.After;
@@ -41,11 +43,13 @@ import static org.junit.Assert.assertTrue;
 
 public class TestSafeModeWithStripedFile {
 
-  static final short DATA_BLK_NUM = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  static final short PARITY_BLK_NUM = StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  static final int numDNs = DATA_BLK_NUM + PARITY_BLK_NUM;
-  static final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  static final int blockSize = cellSize * 2;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+  private final int numDNs = dataBlocks + parityBlocks;
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int blockSize = cellSize * 2;
 
   private MiniDFSCluster cluster;
   private Configuration conf;
@@ -78,7 +82,7 @@ public class TestSafeModeWithStripedFile {
 
   @Test
   public void testStripedFile1() throws IOException {
-    int numCell = DATA_BLK_NUM - 1;
+    int numCell = dataBlocks - 1;
     doTest(cellSize * numCell, numCell);
   }
 
@@ -101,7 +105,7 @@ public class TestSafeModeWithStripedFile {
     // If we only have 1 block, NN won't enter safemode in the first place
     // because the threshold is 0 blocks.
     // So we need to add another 2 blocks.
-    int bigSize = blockSize * DATA_BLK_NUM * 2;
+    int bigSize = blockSize * dataBlocks * 2;
     Path bigFilePath = new Path("/testStripedFile_" + bigSize);
     data = StripedFileTestUtil.generateBytes(bigSize);
     DFSTestUtil.writeFile(fs, bigFilePath, data);
@@ -143,7 +147,7 @@ public class TestSafeModeWithStripedFile {
     assertEquals(1, NameNodeAdapter.getSafeModeSafeBlocks(nn));
 
     // the 2 blocks of bigFile need DATA_BLK_NUM storages to be safe
-    for (int i = minStorages; i < DATA_BLK_NUM - 1; i++) {
+    for (int i = minStorages; i < dataBlocks - 1; i++) {
       cluster.restartDataNode(dnprops.remove(0));
       cluster.waitActive();
       cluster.triggerBlockReports();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
index 0836656..8ada593 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
@@ -24,8 +24,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.ipc.RemoteException;
@@ -43,14 +45,18 @@ import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Random;
 
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.stripesPerBlock;
-
 public class TestWriteReadStripedFile {
   public static final Log LOG = 
LogFactory.getLog(TestWriteReadStripedFile.class);
-  private static int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private static short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+  private final int numDNs = dataBlocks + parityBlocks;
+  private final int stripesPerBlock = 4;
+  private final int blockSize = stripesPerBlock * cellSize;
+  private final int blockGroupSize = blockSize * dataBlocks;
+
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private Configuration conf = new HdfsConfiguration();
@@ -221,7 +227,8 @@ public class TestWriteReadStripedFile {
 
     StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
         largeBuf);
-    StripedFileTestUtil.verifySeek(fs, srcPath, fileLength);
+    StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
+        blockGroupSize);
     StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
         ByteBuffer.allocate(fileLength + 100));
     StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
@@ -263,7 +270,8 @@ public class TestWriteReadStripedFile {
     //StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, 
largeBuf);
 
     StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, 
largeBuf);
-    StripedFileTestUtil.verifySeek(fs, srcPath, fileLength);
+    StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
+        blockGroupSize);
     StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, 
smallBuf);
     // webhdfs doesn't support bytebuffer read
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
index fad2dcd..b89e169 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Assert;
@@ -32,9 +34,6 @@ import org.junit.Test;
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
-
 public class TestWriteStripedFileWithFailure {
   public static final Log LOG = LogFactory
       .getLog(TestWriteStripedFileWithFailure.class);
@@ -47,8 +46,12 @@ public class TestWriteStripedFileWithFailure {
     GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.ALL);
   }
 
-  private final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private final short parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+  private final int numDNs = dataBlocks + parityBlocks;
+  private final int blockSize = 4 * ecPolicy.getCellSize();
   private final int smallFileLength = blockSize * dataBlocks - 123;
   private final int largeFileLength = blockSize * dataBlocks + 123;
   private final int[] fileLengths = {smallFileLength, largeFileLength};
@@ -153,7 +156,8 @@ public class TestWriteStripedFileWithFailure {
     byte[] largeBuf = new byte[fileLength + 100];
     final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
     StripedFileTestUtil.verifyLength(fs, srcPath, fileLength);
-    StripedFileTestUtil.verifySeek(fs, srcPath, fileLength);
+    StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
+        blockSize * dataBlocks);
     StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
         smallBuf);
     StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, 
largeBuf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index 5cb2571..eedf08d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -212,7 +211,7 @@ public class TestPBHelper {
         datanodeUuids, storageIDs, storageTypes);
     if (isStriped) {
       blkLocs = new StripedBlockWithLocations(blkLocs, indices, dataBlkNum,
-          StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE);
+          ErasureCodingPolicyManager.getSystemDefaultPolicy().getCellSize());
     }
     return blkLocs;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 22b48ad..b63aa4f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -44,6 +44,9 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
+
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.junit.AfterClass;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -196,15 +199,17 @@ public class TestBalancer {
     conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
   }
 
-  int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  int groupSize = dataBlocks + parityBlocks;
-  private final static int cellSize = 
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private final static int stripesPerBlock = 4;
-  static int DEFAULT_STRIPE_BLOCK_SIZE = cellSize * stripesPerBlock;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int dataBlocks = ecPolicy.getNumDataUnits();
+  private final int parityBlocks = ecPolicy.getNumParityUnits();
+  private final int groupSize = dataBlocks + parityBlocks;
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripesPerBlock = 4;
+  private final int defaultBlockSize = cellSize * stripesPerBlock;
 
-  static void initConfWithStripe(Configuration conf) {
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_STRIPE_BLOCK_SIZE);
+  void initConfWithStripe(Configuration conf) {
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, 
false);
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
     SimulatedFSDataset.setFactory(conf);
@@ -1908,7 +1913,7 @@ public class TestBalancer {
   private void doTestBalancerWithStripedFile(Configuration conf) throws 
Exception {
     int numOfDatanodes = dataBlocks + parityBlocks + 2;
     int numOfRacks = dataBlocks;
-    long capacity = 20 * DEFAULT_STRIPE_BLOCK_SIZE;
+    long capacity = 20 * defaultBlockSize;
     long[] capacities = new long[numOfDatanodes];
     for (int i = 0; i < capacities.length; i++) {
       capacities[i] = capacity;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
index 2fc454b..255127f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
-import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
 import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.junit.Assert;
@@ -33,8 +32,6 @@ import java.io.DataOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.nio.ByteBuffer;
 
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
@@ -43,11 +40,12 @@ import static org.junit.Assert.fail;
  * Test {@link BlockInfoStriped}
  */
 public class TestBlockInfoStriped {
-  private static final int TOTAL_NUM_BLOCKS = NUM_DATA_BLOCKS + 
NUM_PARITY_BLOCKS;
   private static final long BASE_ID = -1600;
-  private static final Block baseBlock = new Block(BASE_ID);
-  private static final ErasureCodingPolicy testECPolicy
+  private final Block baseBlock = new Block(BASE_ID);
+  private final ErasureCodingPolicy testECPolicy
       = ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int totalBlocks = testECPolicy.getNumDataUnits() +
+      testECPolicy.getNumParityUnits();
   private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
       testECPolicy);
 
@@ -70,8 +68,8 @@ public class TestBlockInfoStriped {
     // first add NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS storages, i.e., a complete
     // group of blocks/storages
     DatanodeStorageInfo[] storageInfos = 
DFSTestUtil.createDatanodeStorageInfos(
-        TOTAL_NUM_BLOCKS);
-    Block[] blocks = createReportedBlocks(TOTAL_NUM_BLOCKS);
+        totalBlocks);
+    Block[] blocks = createReportedBlocks(totalBlocks);
     int i = 0;
     for (; i < storageInfos.length; i += 2) {
       info.addStorage(storageInfos[i], blocks[i]);
@@ -85,8 +83,8 @@ public class TestBlockInfoStriped {
 
     // check
     byte[] indices = (byte[]) Whitebox.getInternalState(info, "indices");
-    Assert.assertEquals(TOTAL_NUM_BLOCKS, info.getCapacity());
-    Assert.assertEquals(TOTAL_NUM_BLOCKS, indices.length);
+    Assert.assertEquals(totalBlocks, info.getCapacity());
+    Assert.assertEquals(totalBlocks, indices.length);
     i = 0;
     for (DatanodeStorageInfo storage : storageInfos) {
       int index = info.findStorageInfo(storage);
@@ -99,9 +97,9 @@ public class TestBlockInfoStriped {
     for (DatanodeStorageInfo storage : storageInfos) {
       Assert.assertTrue(info.addStorage(storage, blocks[i++]));
     }
-    Assert.assertEquals(TOTAL_NUM_BLOCKS, info.getCapacity());
-    Assert.assertEquals(TOTAL_NUM_BLOCKS, info.numNodes());
-    Assert.assertEquals(TOTAL_NUM_BLOCKS, indices.length);
+    Assert.assertEquals(totalBlocks, info.getCapacity());
+    Assert.assertEquals(totalBlocks, info.numNodes());
+    Assert.assertEquals(totalBlocks, indices.length);
     i = 0;
     for (DatanodeStorageInfo storage : storageInfos) {
       int index = info.findStorageInfo(storage);
@@ -111,19 +109,19 @@ public class TestBlockInfoStriped {
 
     // the same block is reported from another storage
     DatanodeStorageInfo[] storageInfos2 = 
DFSTestUtil.createDatanodeStorageInfos(
-        TOTAL_NUM_BLOCKS * 2);
+        totalBlocks * 2);
     // only add the second half of info2
-    for (i = TOTAL_NUM_BLOCKS; i < storageInfos2.length; i++) {
-      info.addStorage(storageInfos2[i], blocks[i % TOTAL_NUM_BLOCKS]);
+    for (i = totalBlocks; i < storageInfos2.length; i++) {
+      info.addStorage(storageInfos2[i], blocks[i % totalBlocks]);
       Assert.assertEquals(i + 1, info.getCapacity());
       Assert.assertEquals(i + 1, info.numNodes());
       indices = (byte[]) Whitebox.getInternalState(info, "indices");
       Assert.assertEquals(i + 1, indices.length);
     }
-    for (i = TOTAL_NUM_BLOCKS; i < storageInfos2.length; i++) {
+    for (i = totalBlocks; i < storageInfos2.length; i++) {
       int index = info.findStorageInfo(storageInfos2[i]);
       Assert.assertEquals(i++, index);
-      Assert.assertEquals(index - TOTAL_NUM_BLOCKS, indices[index]);
+      Assert.assertEquals(index - totalBlocks, indices[index]);
     }
   }
 
@@ -131,8 +129,8 @@ public class TestBlockInfoStriped {
   public void testRemoveStorage() {
     // first add TOTAL_NUM_BLOCKS into the BlockInfoStriped
     DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos(
-        TOTAL_NUM_BLOCKS);
-    Block[] blocks = createReportedBlocks(TOTAL_NUM_BLOCKS);
+        totalBlocks);
+    Block[] blocks = createReportedBlocks(totalBlocks);
     for (int i = 0; i < storages.length; i++) {
       info.addStorage(storages[i], blocks[i]);
     }
@@ -142,8 +140,8 @@ public class TestBlockInfoStriped {
     info.removeStorage(storages[2]);
 
     // check
-    Assert.assertEquals(TOTAL_NUM_BLOCKS, info.getCapacity());
-    Assert.assertEquals(TOTAL_NUM_BLOCKS - 2, info.numNodes());
+    Assert.assertEquals(totalBlocks, info.getCapacity());
+    Assert.assertEquals(totalBlocks - 2, info.numNodes());
     byte[] indices = (byte[]) Whitebox.getInternalState(info, "indices");
     for (int i = 0; i < storages.length; i++) {
       int index = info.findStorageInfo(storages[i]);
@@ -158,44 +156,44 @@ public class TestBlockInfoStriped {
 
     // the same block is reported from another storage
     DatanodeStorageInfo[] storages2 = DFSTestUtil.createDatanodeStorageInfos(
-        TOTAL_NUM_BLOCKS * 2);
-    for (int i = TOTAL_NUM_BLOCKS; i < storages2.length; i++) {
-      info.addStorage(storages2[i], blocks[i % TOTAL_NUM_BLOCKS]);
+        totalBlocks * 2);
+    for (int i = totalBlocks; i < storages2.length; i++) {
+      info.addStorage(storages2[i], blocks[i % totalBlocks]);
     }
     // now we should have 8 storages
-    Assert.assertEquals(TOTAL_NUM_BLOCKS * 2 - 2, info.numNodes());
-    Assert.assertEquals(TOTAL_NUM_BLOCKS * 2 - 2, info.getCapacity());
+    Assert.assertEquals(totalBlocks * 2 - 2, info.numNodes());
+    Assert.assertEquals(totalBlocks * 2 - 2, info.getCapacity());
     indices = (byte[]) Whitebox.getInternalState(info, "indices");
-    Assert.assertEquals(TOTAL_NUM_BLOCKS * 2 - 2, indices.length);
-    int j = TOTAL_NUM_BLOCKS;
-    for (int i = TOTAL_NUM_BLOCKS; i < storages2.length; i++) {
+    Assert.assertEquals(totalBlocks * 2 - 2, indices.length);
+    int j = totalBlocks;
+    for (int i = totalBlocks; i < storages2.length; i++) {
       int index = info.findStorageInfo(storages2[i]);
-      if (i == TOTAL_NUM_BLOCKS || i == TOTAL_NUM_BLOCKS + 2) {
-        Assert.assertEquals(i - TOTAL_NUM_BLOCKS, index);
+      if (i == totalBlocks || i == totalBlocks + 2) {
+        Assert.assertEquals(i - totalBlocks, index);
       } else {
         Assert.assertEquals(j++, index);
       }
     }
 
     // remove the storages from storages2
-    for (int i = 0; i < TOTAL_NUM_BLOCKS; i++) {
-      info.removeStorage(storages2[i + TOTAL_NUM_BLOCKS]);
+    for (int i = 0; i < totalBlocks; i++) {
+      info.removeStorage(storages2[i + totalBlocks]);
     }
     // now we should have 3 storages
-    Assert.assertEquals(TOTAL_NUM_BLOCKS - 2, info.numNodes());
-    Assert.assertEquals(TOTAL_NUM_BLOCKS * 2 - 2, info.getCapacity());
+    Assert.assertEquals(totalBlocks - 2, info.numNodes());
+    Assert.assertEquals(totalBlocks * 2 - 2, info.getCapacity());
     indices = (byte[]) Whitebox.getInternalState(info, "indices");
-    Assert.assertEquals(TOTAL_NUM_BLOCKS * 2 - 2, indices.length);
-    for (int i = 0; i < TOTAL_NUM_BLOCKS; i++) {
+    Assert.assertEquals(totalBlocks * 2 - 2, indices.length);
+    for (int i = 0; i < totalBlocks; i++) {
       if (i == 0 || i == 2) {
-        int index = info.findStorageInfo(storages2[i + TOTAL_NUM_BLOCKS]);
+        int index = info.findStorageInfo(storages2[i + totalBlocks]);
         Assert.assertEquals(-1, index);
       } else {
         int index = info.findStorageInfo(storages[i]);
         Assert.assertEquals(i, index);
       }
     }
-    for (int i = TOTAL_NUM_BLOCKS; i < TOTAL_NUM_BLOCKS * 2 - 2; i++) {
+    for (int i = totalBlocks; i < totalBlocks * 2 - 2; i++) {
       Assert.assertEquals(-1, indices[i]);
       Assert.assertNull(info.getDatanode(i));
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
index 1714561..834df6f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
@@ -20,10 +20,11 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.ServerSocketUtil;
 import org.junit.Rule;
@@ -33,12 +34,13 @@ import org.junit.rules.Timeout;
 import java.io.IOException;
 
 public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
-
-  private final static int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private final static int parityBlocks = 
StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  private final static int cellSize = 
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private final static int stripesPerBlock = 4;
-  private final static int numDNs = dataBlocks + parityBlocks + 2;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int dataBlocks = ecPolicy.getNumDataUnits();
+  private final int parityBlocks = ecPolicy.getNumParityUnits();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripesPerBlock = 4;
+  private final int numDNs = dataBlocks + parityBlocks + 2;
   private MiniDFSCluster cluster;
   private Configuration conf;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index 3bc13a8..94ee623 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -24,10 +24,12 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.net.NetworkTopology;
@@ -46,10 +48,6 @@ import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 
-import static 
org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
-import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
-
 public class TestReconstructStripedBlocksWithRackAwareness {
   public static final Logger LOG = LoggerFactory.getLogger(
       TestReconstructStripedBlocksWithRackAwareness.class);
@@ -60,10 +58,14 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     GenericTestUtils.setLogLevel(BlockManager.LOG, Level.ALL);
   }
 
-  private static final String[] hosts =
-      getHosts(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1);
-  private static final String[] racks =
-      getRacks(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1, NUM_DATA_BLOCKS);
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+  private final String[] hosts = getHosts(dataBlocks + parityBlocks + 1);
+  private final String[] racks =
+      getRacks(dataBlocks + parityBlocks + 1, dataBlocks);
 
   private static String[] getHosts(int numHosts) {
     String[] hosts = new String[numHosts];
@@ -157,7 +159,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     final Path file = new Path("/foo");
     // the file's block is in 9 dn but 5 racks
     DFSTestUtil.createFile(fs, file,
-        BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2, (short) 1, 0L);
+        cellSize * dataBlocks * 2, (short) 1, 0L);
     Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
 
     final INodeFile fileNode = fsn.getFSDirectory()
@@ -169,7 +171,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     for (DatanodeStorageInfo storage : blockInfo.storages) {
       rackSet.add(storage.getDatanodeDescriptor().getNetworkLocation());
     }
-    Assert.assertEquals(NUM_DATA_BLOCKS - 1, rackSet.size());
+    Assert.assertEquals(dataBlocks - 1, rackSet.size());
 
     // restart the stopped datanode
     cluster.restartDataNode(lastHost);
@@ -178,7 +180,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     // make sure we have 6 racks again
     NetworkTopology topology = bm.getDatanodeManager().getNetworkTopology();
     Assert.assertEquals(hosts.length, topology.getNumOfLeaves());
-    Assert.assertEquals(NUM_DATA_BLOCKS, topology.getNumOfRacks());
+    Assert.assertEquals(dataBlocks, topology.getNumOfRacks());
 
     // pause all the heartbeats
     for (DataNode dn : cluster.getDataNodes()) {
@@ -225,7 +227,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 
     final Path file = new Path("/foo");
     DFSTestUtil.createFile(fs, file,
-        BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2, (short) 1, 0L);
+        cellSize * dataBlocks * 2, (short) 1, 0L);
 
     // stop host1
     MiniDFSCluster.DataNodeProperties host1 = stopDataNode("host1");
@@ -234,7 +236,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     cluster.waitActive();
 
     // wait for reconstruction to finish
-    final short blockNum = (short) (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS);
+    final short blockNum = (short) (dataBlocks + parityBlocks);
     DFSTestUtil.waitForReplication(fs, file, blockNum, 15 * 1000);
 
     // restart host1
@@ -263,12 +265,12 @@ public class 
TestReconstructStripedBlocksWithRackAwareness {
    */
   @Test
   public void testReconstructionWithDecommission() throws Exception {
-    final String[] racks = getRacks(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2,
-        NUM_DATA_BLOCKS);
-    final String[] hosts = getHosts(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2);
+    final String[] rackNames = getRacks(dataBlocks + parityBlocks + 2,
+        dataBlocks);
+    final String[] hostNames = getHosts(dataBlocks + parityBlocks + 2);
     // we now have 11 hosts on 6 racks with distribution: 2-2-2-2-2-1
-    cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
-        .numDataNodes(hosts.length).build();
+    cluster = new 
MiniDFSCluster.Builder(conf).racks(rackNames).hosts(hostNames)
+        .numDataNodes(hostNames.length).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     fs.setErasureCodingPolicy(new Path("/"), null);
@@ -277,11 +279,13 @@ public class 
TestReconstructStripedBlocksWithRackAwareness {
     final DatanodeManager dm = bm.getDatanodeManager();
 
     // stop h9 and h10 and create a file with 6+3 internal blocks
-    MiniDFSCluster.DataNodeProperties h9 = stopDataNode(hosts[hosts.length - 
3]);
-    MiniDFSCluster.DataNodeProperties h10 = stopDataNode(hosts[hosts.length - 
2]);
+    MiniDFSCluster.DataNodeProperties h9 =
+        stopDataNode(hostNames[hostNames.length - 3]);
+    MiniDFSCluster.DataNodeProperties h10 =
+        stopDataNode(hostNames[hostNames.length - 2]);
     final Path file = new Path("/foo");
     DFSTestUtil.createFile(fs, file,
-        BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2, (short) 1, 0L);
+        cellSize * dataBlocks * 2, (short) 1, 0L);
     final BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory()
         .getINode(file.toString()).asFile().getLastBlock();
 
@@ -290,18 +294,19 @@ public class 
TestReconstructStripedBlocksWithRackAwareness {
     cluster.waitActive();
 
     // stop h11 so that the reconstruction happens
-    MiniDFSCluster.DataNodeProperties h11 = stopDataNode(hosts[hosts.length - 
1]);
+    MiniDFSCluster.DataNodeProperties h11 =
+        stopDataNode(hostNames[hostNames.length - 1]);
     boolean recovered = bm.countNodes(blockInfo).liveReplicas() >=
-        NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
+        dataBlocks + parityBlocks;
     for (int i = 0; i < 10 & !recovered; i++) {
       Thread.sleep(1000);
       recovered = bm.countNodes(blockInfo).liveReplicas() >=
-          NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
+          dataBlocks + parityBlocks;
     }
     Assert.assertTrue(recovered);
 
     // mark h9 as decommissioning
-    DataNode datanode9 = getDataNode(hosts[hosts.length - 3]);
+    DataNode datanode9 = getDataNode(hostNames[hostNames.length - 3]);
     Assert.assertNotNull(datanode9);
     final DatanodeDescriptor dn9 = dm.getDatanode(datanode9.getDatanodeId());
     dn9.startDecommission();
@@ -310,7 +315,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     cluster.restartDataNode(h10);
     cluster.restartDataNode(h11);
     cluster.waitActive();
-    DataNodeTestUtils.triggerBlockReport(getDataNode(hosts[hosts.length - 1]));
+    DataNodeTestUtils.triggerBlockReport(
+        getDataNode(hostNames[hostNames.length - 1]));
 
     // start decommissioning h9
     boolean satisfied = bm.isPlacementPolicySatisfied(blockInfo);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
index 41a7878..10ea01f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
@@ -37,8 +37,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
@@ -56,11 +57,13 @@ public class TestSequentialBlockGroupId {
   private static final Log LOG = LogFactory
       .getLog("TestSequentialBlockGroupId");
 
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
   private final short REPLICATION = 1;
   private final long SEED = 0;
-  private final int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private final int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
+  private final int dataBlocks = ecPolicy.getNumDataUnits();
+  private final int parityBlocks = ecPolicy.getNumParityUnits();
+  private final int cellSize = ecPolicy.getCellSize();
 
   private final int stripesPerBlock = 2;
   private final int blockSize = cellSize * stripesPerBlock;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java
index 1dd067d..4db3617 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java
@@ -27,13 +27,14 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Time;
@@ -51,10 +52,14 @@ import org.slf4j.LoggerFactory;
 public class TestSortLocatedStripedBlock {
   static final Logger LOG = LoggerFactory
       .getLogger(TestSortLocatedStripedBlock.class);
-  static final int BLK_GROUP_WIDTH = StripedFileTestUtil.NUM_DATA_BLOCKS
-      + StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  static final int NUM_DATA_BLOCKS = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  static final int NUM_PARITY_BLOCKS = StripedFileTestUtil.NUM_PARITY_BLOCKS;
+
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+  private final int groupSize = dataBlocks + parityBlocks;
+
   static DatanodeManager dm;
   static final long STALE_INTERVAL = 30 * 1000 * 60;
 
@@ -100,7 +105,7 @@ public class TestSortLocatedStripedBlock {
     HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
         lbsCount * decommnNodeIndices.size());
     List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
-        NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
+        dataBlocks, parityBlocks, decommnNodeIndices,
         targetNodeIndices, decommissionedNodes);
 
     // prepare expected block index and token list.
@@ -111,7 +116,7 @@ public class TestSortLocatedStripedBlock {
 
     dm.sortLocatedBlocks(null, lbs);
 
-    assertDecommnNodePosition(BLK_GROUP_WIDTH, decommissionedNodes, lbs);
+    assertDecommnNodePosition(groupSize, decommissionedNodes, lbs);
     assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
   }
 
@@ -156,7 +161,7 @@ public class TestSortLocatedStripedBlock {
     HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
         lbsCount * decommnNodeIndices.size());
     List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
-        NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
+        dataBlocks, parityBlocks, decommnNodeIndices,
         targetNodeIndices, decommissionedNodes);
 
     // prepare expected block index and token list.
@@ -166,7 +171,7 @@ public class TestSortLocatedStripedBlock {
     prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
 
     dm.sortLocatedBlocks(null, lbs);
-    assertDecommnNodePosition(BLK_GROUP_WIDTH, decommissionedNodes, lbs);
+    assertDecommnNodePosition(groupSize, decommissionedNodes, lbs);
     assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
   }
 
@@ -209,9 +214,9 @@ public class TestSortLocatedStripedBlock {
     // which will be used for assertions
     HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
         lbsCount * decommnNodeIndices.size());
-    int dataBlksNum = NUM_DATA_BLOCKS - 2;
+    int dataBlksNum = dataBlocks - 2;
     List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount, dataBlksNum,
-        NUM_PARITY_BLOCKS, decommnNodeIndices, targetNodeIndices,
+        parityBlocks, decommnNodeIndices, targetNodeIndices,
         decommissionedNodes);
 
     // prepare expected block index and token list.
@@ -223,7 +228,7 @@ public class TestSortLocatedStripedBlock {
     dm.sortLocatedBlocks(null, lbs);
 
     // After this index all are decommissioned nodes.
-    int blkGrpWidth = dataBlksNum + NUM_PARITY_BLOCKS;
+    int blkGrpWidth = dataBlksNum + parityBlocks;
     assertDecommnNodePosition(blkGrpWidth, decommissionedNodes, lbs);
     assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
   }
@@ -275,7 +280,7 @@ public class TestSortLocatedStripedBlock {
     HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
         lbsCount * decommnNodeIndices.size());
     List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
-        NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
+        dataBlocks, parityBlocks, decommnNodeIndices,
         targetNodeIndices, decommissionedNodes);
 
     // prepare expected block index and token list.
@@ -288,7 +293,7 @@ public class TestSortLocatedStripedBlock {
 
     // After this index all are decommissioned nodes. Needs to reconstruct two
     // more block indices.
-    int blkGrpWidth = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS - 2;
+    int blkGrpWidth = dataBlocks + parityBlocks - 2;
     assertDecommnNodePosition(blkGrpWidth, decommissionedNodes, lbs);
     assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
   }
@@ -336,7 +341,7 @@ public class TestSortLocatedStripedBlock {
     HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
         lbsCount * decommnNodeIndices.size());
     List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
-        NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
+        dataBlocks, parityBlocks, decommnNodeIndices,
         targetNodeIndices, decommissionedNodes);
     List <DatanodeInfo> staleDns = new ArrayList<>();
     for (LocatedBlock lb : lbs) {
@@ -355,7 +360,7 @@ public class TestSortLocatedStripedBlock {
 
     dm.sortLocatedBlocks(null, lbs);
 
-    assertDecommnNodePosition(BLK_GROUP_WIDTH + 1, decommissionedNodes, lbs);
+    assertDecommnNodePosition(groupSize + 1, decommissionedNodes, lbs);
     assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
 
     for (LocatedBlock lb : lbs) {
@@ -452,7 +457,7 @@ public class TestSortLocatedStripedBlock {
       }
     }
     // Adding parity blocks after data blocks
-    index = NUM_DATA_BLOCKS;
+    index = dataBlocks;
     for (int j = numDataBlk; j < numDataBlk + numParityBlk; j++, index++) {
       blkIndices[j] = (byte) index;
       // Location port always equal to logical index of a block,
@@ -471,7 +476,7 @@ public class TestSortLocatedStripedBlock {
       }
     }
     // Add extra target nodes to storage list after the parity blocks
-    int basePortValue = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
+    int basePortValue = dataBlocks + parityBlocks;
     index = numDataBlk + numParityBlk;
     for (int i = 0; i < targetNodeIndices.size(); i++, index++) {
       int blkIndexPos = targetNodeIndices.get(i);
@@ -494,7 +499,7 @@ public class TestSortLocatedStripedBlock {
     }
     return new LocatedStripedBlock(
         new ExtendedBlock("pool", blockGroupID,
-            StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE, 1001),
+            cellSize, 1001),
         locs, storageIDs, storageTypes, blkIndices, 0, false, null);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index f011b9d..3ef4067 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -109,7 +108,7 @@ import org.mockito.stubbing.Answer;
 import com.google.common.base.Supplier;
 
 /**
- * This tests if sync all replicas in block recovery works correctly
+ * This tests if sync all replicas in block recovery works correctly.
  */
 public class TestBlockRecovery {
   private static final Log LOG = LogFactory.getLog(TestBlockRecovery.class);
@@ -136,30 +135,30 @@ public class TestBlockRecovery {
   @Rule
   public TestName currentTestName = new TestName();
 
-  private static final int CELL_SIZE =
-      StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private static final int bytesPerChecksum = 512;
-  private static final int[][][] BLOCK_LENGTHS_SUITE = {
-      { { 11 * CELL_SIZE, 10 * CELL_SIZE, 9 * CELL_SIZE, 8 * CELL_SIZE,
-          7 * CELL_SIZE, 6 * CELL_SIZE, 5 * CELL_SIZE, 4 * CELL_SIZE,
-          3 * CELL_SIZE }, { 36 * CELL_SIZE } },
-
-      { { 3 * CELL_SIZE, 4 * CELL_SIZE, 5 * CELL_SIZE, 6 * CELL_SIZE,
-          7 * CELL_SIZE, 8 * CELL_SIZE, 9 * CELL_SIZE, 10 * CELL_SIZE,
-          11 * CELL_SIZE }, { 36 * CELL_SIZE } },
-
-      { { 11 * CELL_SIZE, 7 * CELL_SIZE, 6 * CELL_SIZE, 5 * CELL_SIZE,
-          4 * CELL_SIZE, 2 * CELL_SIZE, 9 * CELL_SIZE, 10 * CELL_SIZE,
-          11 * CELL_SIZE }, { 36 * CELL_SIZE } },
-
-      { { 8 * CELL_SIZE + bytesPerChecksum,
-          7 * CELL_SIZE + bytesPerChecksum * 2,
-          6 * CELL_SIZE + bytesPerChecksum * 2,
-          5 * CELL_SIZE - bytesPerChecksum * 3,
-          4 * CELL_SIZE - bytesPerChecksum * 4,
-          3 * CELL_SIZE - bytesPerChecksum * 4, 9 * CELL_SIZE, 10 * CELL_SIZE,
-          11 * CELL_SIZE }, { 36 * CELL_SIZE } }, };
-  
+  private final int cellSize =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy().getCellSize();
+  private final int bytesPerChecksum = 512;
+  private final int[][][] blockLengthsSuite = {
+      {{11 * cellSize, 10 * cellSize, 9 * cellSize, 8 * cellSize,
+        7 * cellSize, 6 * cellSize, 5 * cellSize, 4 * cellSize,
+        3 * cellSize}, {36 * cellSize}},
+
+      {{3 * cellSize, 4 * cellSize, 5 * cellSize, 6 * cellSize,
+        7 * cellSize, 8 * cellSize, 9 * cellSize, 10 * cellSize,
+        11 * cellSize}, {36 * cellSize}},
+
+      {{11 * cellSize, 7 * cellSize, 6 * cellSize, 5 * cellSize,
+        4 * cellSize, 2 * cellSize, 9 * cellSize, 10 * cellSize,
+        11 * cellSize}, {36 * cellSize}},
+
+      {{8 * cellSize + bytesPerChecksum,
+        7 * cellSize + bytesPerChecksum * 2,
+        6 * cellSize + bytesPerChecksum * 2,
+        5 * cellSize - bytesPerChecksum * 3,
+        4 * cellSize - bytesPerChecksum * 4,
+        3 * cellSize - bytesPerChecksum * 4, 9 * cellSize, 10 * cellSize,
+        11 * cellSize}, {36 * cellSize}}, };
+
   static {
     GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
     GenericTestUtils.setLogLevel(LOG, Level.ALL);
@@ -807,9 +806,9 @@ public class TestBlockRecovery {
     BlockRecoveryWorker.RecoveryTaskStriped recoveryTask =
         recoveryWorker.new RecoveryTaskStriped(rBlockStriped);
 
-    for (int i = 0; i < BLOCK_LENGTHS_SUITE.length; i++) {
-      int[] blockLengths = BLOCK_LENGTHS_SUITE[i][0];
-      int safeLength = BLOCK_LENGTHS_SUITE[i][1][0];
+    for (int i = 0; i < blockLengthsSuite.length; i++) {
+      int[] blockLengths = blockLengthsSuite[i][0];
+      int safeLength = blockLengthsSuite[i][1][0];
       Map<Long, BlockRecord> syncList = new HashMap<>();
       for (int id = 0; id < blockLengths.length; id++) {
         ReplicaRecoveryInfo rInfo = new ReplicaRecoveryInfo(id,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
index 825aa5a..98efc13c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
@@ -29,11 +29,13 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
@@ -58,15 +60,14 @@ import java.util.Arrays;
 public class TestDataNodeErasureCodingMetrics {
   public static final Log LOG = LogFactory.
       getLog(TestDataNodeErasureCodingMetrics.class);
-
-  private static final int DATA_BLK_NUM = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private static final int PARITY_BLK_NUM =
-      StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  private static final int CELLSIZE =
-      StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private static final int BLOCKSIZE = CELLSIZE;
-  private static final int GROUPSIZE = DATA_BLK_NUM + PARITY_BLK_NUM;
-  private static final int DN_NUM = GROUPSIZE + 1;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int dataBlocks = ecPolicy.getNumDataUnits();
+  private final int parityBlocks = ecPolicy.getNumParityUnits();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int blockSize = cellSize;
+  private final int groupSize = dataBlocks + parityBlocks;
+  private final int numDNs = groupSize + 1;
 
   private MiniDFSCluster cluster;
   private Configuration conf;
@@ -76,9 +77,9 @@ public class TestDataNodeErasureCodingMetrics {
   public void setup() throws IOException {
     conf = new Configuration();
 
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DN_NUM).build();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.waitActive();
     cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
     fs = cluster.getFileSystem();
@@ -130,7 +131,7 @@ public class TestDataNodeErasureCodingMetrics {
   private DataNode doTest(String fileName) throws Exception {
 
     Path file = new Path(fileName);
-    long fileLen = DATA_BLK_NUM * BLOCKSIZE;
+    long fileLen = dataBlocks * blockSize;
     final byte[] data = StripedFileTestUtil.generateBytes((int) fileLen);
     DFSTestUtil.writeFile(fs, file, data);
     StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);
@@ -142,7 +143,7 @@ public class TestDataNodeErasureCodingMetrics {
         (LocatedStripedBlock)locatedBlocks.getLastLocatedBlock();
     DataNode workerDn = null;
     DatanodeInfo[] locations = lastBlock.getLocations();
-    assertEquals(locations.length, GROUPSIZE);
+    assertEquals(locations.length, groupSize);
 
     // we have ONE extra datanode in addition to the GROUPSIZE datanodes, here
     // is to find the extra datanode that the reconstruction task will run on,
@@ -178,7 +179,7 @@ public class TestDataNodeErasureCodingMetrics {
     int workCount = getComputedDatanodeWork();
     assertTrue("Wrongly computed block reconstruction work", workCount > 0);
     cluster.triggerHeartbeats();
-    StripedFileTestUtil.waitForReconstructionFinished(file, fs, GROUPSIZE);
+    StripedFileTestUtil.waitForReconstructionFinished(file, fs, groupSize);
 
     return workerDn;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ffa116/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index b7b750b..4342dab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -72,6 +73,7 @@ import org.apache.hadoop.hdfs.server.balancer.ExitStatus;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
 import org.apache.hadoop.hdfs.server.mover.Mover.MLocation;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.minikdc.MiniKdc;
@@ -469,14 +471,16 @@ public class TestMover {
     }
   }
 
-  int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
-  private final static int cellSize = 
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
-  private final static int stripesPerBlock = 4;
-  static int DEFAULT_STRIPE_BLOCK_SIZE = cellSize * stripesPerBlock;
+  private final ErasureCodingPolicy ecPolicy =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  private final int dataBlocks = ecPolicy.getNumDataUnits();
+  private final int parityBlocks = ecPolicy.getNumParityUnits();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripesPerBlock = 4;
+  private final int defaultBlockSize = cellSize * stripesPerBlock;
 
-  static void initConfWithStripe(Configuration conf) {
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_STRIPE_BLOCK_SIZE);
+  void initConfWithStripe(Configuration conf) {
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, 
false);
@@ -490,7 +494,7 @@ public class TestMover {
     // start 10 datanodes
     int numOfDatanodes =10;
     int storagesPerDatanode=2;
-    long capacity = 10 * DEFAULT_STRIPE_BLOCK_SIZE;
+    long capacity = 10 * defaultBlockSize;
     long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
     for (int i = 0; i < numOfDatanodes; i++) {
       for(int j=0;j<storagesPerDatanode;j++){
@@ -529,7 +533,7 @@ public class TestMover {
 
       // write file to barDir
       final String fooFile = "/bar/foo";
-      long fileLen = 20 * DEFAULT_STRIPE_BLOCK_SIZE ;
+      long fileLen = 20 * defaultBlockSize;
       DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile),
           fileLen,(short) 3, 0);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to