http://git-wip-us.apache.org/repos/asf/hadoop/blob/073bbd80/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 556eca6..db0b0d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFormat;
-import org.apache.hadoop.hdfs.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -63,6 +63,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.ipc.GenericRefreshProtocol;
@@ -609,15 +610,18 @@ public class DFSAdmin extends FsShell {
           + argv[1]);
     }
     byte storagePolicyId = status.getStoragePolicy();
-    BlockStoragePolicy.Suite suite = BlockStoragePolicy
-        .readBlockStorageSuite(getConf());
-    BlockStoragePolicy policy = suite.getPolicy(storagePolicyId);
-    if (policy != null) {
-      System.out.println("The storage policy of " + argv[1] + ":\n" + policy);
+    if (storagePolicyId == BlockStoragePolicySuite.ID_UNSPECIFIED) {
+      System.out.println("The storage policy of " + argv[1] + " is 
unspecified");
       return 0;
-    } else {
-      throw new IOException("Cannot identify the storage policy for " + 
argv[1]);
     }
+    BlockStoragePolicy[] policies = dfs.getStoragePolicySuite();
+    for (BlockStoragePolicy p : policies) {
+      if (p.getId() == storagePolicyId) {
+        System.out.println("The storage policy of " + argv[1] + ":\n" + p);
+        return 0;
+      }
+    }
+    throw new IOException("Cannot identify the storage policy for " + argv[1]);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/073bbd80/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 2b3d7e6..0eb7c61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -21,13 +21,14 @@ import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
@@ -264,7 +265,7 @@ public class JsonUtil {
             : childrenNumLong.intValue();
     final byte storagePolicy = m.containsKey("storagePolicy") ?
         (byte) (long) (Long) m.get("storagePolicy") :
-          BlockStoragePolicy.ID_UNSPECIFIED;
+          BlockStoragePolicySuite.ID_UNSPECIFIED;
     return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
         blockSize, mTime, aTime, permission, owner, group, symlink,
         DFSUtil.string2Bytes(localName), fileId, childrenNum, null, 
storagePolicy);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/073bbd80/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
index 082e5bd..ce7bf1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
@@ -108,6 +108,13 @@ message SetStoragePolicyRequestProto {
 message SetStoragePolicyResponseProto { // void response
 }
 
+message GetStoragePolicySuiteRequestProto { // void request
+}
+
+message GetStoragePolicySuiteResponseProto {
+  repeated BlockStoragePolicyProto policies = 1;
+}
+
 message SetPermissionRequestProto {
   required string src = 1;
   required FsPermissionProto permission = 2;
@@ -699,6 +706,8 @@ service ClientNamenodeProtocol {
       returns(SetReplicationResponseProto);
   rpc setStoragePolicy(SetStoragePolicyRequestProto)
       returns(SetStoragePolicyResponseProto);
+  rpc getStoragePolicySuite(GetStoragePolicySuiteRequestProto)
+      returns(GetStoragePolicySuiteResponseProto);
   rpc setPermission(SetPermissionRequestProto)
       returns(SetPermissionResponseProto);
   rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/073bbd80/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index d1ba68f..c57e308 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -169,6 +169,20 @@ message StorageTypesProto {
 }
 
 /**
+ * Block replica storage policy.
+ */
+message BlockStoragePolicyProto {
+  required uint32 policyId = 1;
+  required string name = 2;
+  // a list of storage types for storing the block replicas when creating a
+  // block.
+  required StorageTypesProto creationPolicy = 3;
+  // A list of storage types for creation fallback storage.
+  optional StorageTypesProto creationFallbackPolicy = 4;
+  optional StorageTypesProto replicationFallbackPolicy = 5;
+}
+
+/**
  * A list of storage IDs. 
  */
 message StorageUuidsProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/073bbd80/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index d404c1c..2d4109a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -22,8 +22,7 @@
 <!-- wish to modify from this file into hdfs-site.xml and change them -->
 <!-- there.  If hdfs-site.xml does not already exist, create it.      -->
 
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude";> 
-  <xi:include href="blockStoragePolicy-default.xml" />
+<configuration>
 
 <property>
   <name>hadoop.hdfs.configuration.version</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/073bbd80/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 158c225..38ffcee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.hdfs.BlockStoragePolicy.ID_UNSPECIFIED;
+import static 
org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite.ID_UNSPECIFIED;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
@@ -44,7 +45,7 @@ import org.junit.Test;
 
 /** Test {@link BlockStoragePolicy} */
 public class TestBlockStoragePolicy {
-  public static final BlockStoragePolicy.Suite POLICY_SUITE;
+  public static final BlockStoragePolicySuite POLICY_SUITE;
   public static final BlockStoragePolicy DEFAULT_STORAGE_POLICY;
   public static final Configuration conf;
 
@@ -52,7 +53,7 @@ public class TestBlockStoragePolicy {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
-    POLICY_SUITE = BlockStoragePolicy.readBlockStorageSuite(conf);
+    POLICY_SUITE = BlockStoragePolicySuite.createDefaultSuite();
     DEFAULT_STORAGE_POLICY = POLICY_SUITE.getDefaultPolicy();
   }
 
@@ -948,7 +949,7 @@ public class TestBlockStoragePolicy {
     Assert.assertTrue(typeList.isEmpty());
   }
 
-  private void testIncreaseFileRep(String policyName, byte policyId,
+  private void testChangeFileRep(String policyName, byte policyId,
                                    StorageType[] before,
                                    StorageType[] after) throws Exception {
     final int numDataNodes = 5;
@@ -965,8 +966,6 @@ public class TestBlockStoragePolicy {
       final Path foo = new Path(dir, "foo");
       DFSTestUtil.createFile(fs, foo, FILE_LEN, REPLICATION, 0L);
 
-      // the storage policy of foo should be WARM, and the replicas
-      // should be stored in DISK and ARCHIE
       HdfsFileStatus[] status = fs.getClient().listPaths(foo.toString(),
           HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
       checkDirectoryListing(status, policyId);
@@ -984,7 +983,24 @@ public class TestBlockStoragePolicy {
           HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
       checkDirectoryListing(status, policyId);
       fooStatus = (HdfsLocatedFileStatus) status[0];
-      checkLocatedBlocks(fooStatus, 1, 5, after);
+      checkLocatedBlocks(fooStatus, 1, numDataNodes, after);
+
+      // change the replication factor back to 3
+      fs.setReplication(foo, REPLICATION);
+      Thread.sleep(1000);
+      for (DataNode dn : cluster.getDataNodes()) {
+        DataNodeTestUtils.triggerHeartbeat(dn);
+      }
+      Thread.sleep(1000);
+      for (DataNode dn : cluster.getDataNodes()) {
+        DataNodeTestUtils.triggerBlockReport(dn);
+      }
+      Thread.sleep(1000);
+      status = fs.getClient().listPaths(foo.toString(),
+          HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
+      checkDirectoryListing(status, policyId);
+      fooStatus = (HdfsLocatedFileStatus) status[0];
+      checkLocatedBlocks(fooStatus, 1, REPLICATION, before);
     } finally {
       cluster.shutdown();
     }
@@ -995,11 +1011,12 @@ public class TestBlockStoragePolicy {
    * that file from 3 to 5. Make sure all replications are created in DISKS.
    */
   @Test
-  public void testIncreaseHotFileRep() throws Exception {
-    testIncreaseFileRep("HOT", HOT, new StorageType[]{StorageType.DISK,
-            StorageType.DISK, StorageType.DISK},
+  public void testChangeHotFileRep() throws Exception {
+    testChangeFileRep("HOT", HOT,
         new StorageType[]{StorageType.DISK, StorageType.DISK,
-            StorageType.DISK, StorageType.DISK, StorageType.DISK});
+            StorageType.DISK},
+        new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK,
+            StorageType.DISK, StorageType.DISK});
   }
 
   /**
@@ -1008,9 +1025,10 @@ public class TestBlockStoragePolicy {
    * and ARCHIVE.
    */
   @Test
-  public void testIncreaseWarmRep() throws Exception {
-    testIncreaseFileRep("WARM", WARM, new StorageType[]{StorageType.DISK,
-        StorageType.ARCHIVE, StorageType.ARCHIVE},
+  public void testChangeWarmRep() throws Exception {
+    testChangeFileRep("WARM", WARM,
+        new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
+            StorageType.ARCHIVE},
         new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
             StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
   }
@@ -1020,9 +1038,10 @@ public class TestBlockStoragePolicy {
    * that file from 3 to 5. Make sure all replicas are created in ARCHIVE.
    */
   @Test
-  public void testIncreaseColdRep() throws Exception {
-    testIncreaseFileRep("COLD", COLD, new StorageType[]{StorageType.ARCHIVE,
-            StorageType.ARCHIVE, StorageType.ARCHIVE},
+  public void testChangeColdRep() throws Exception {
+    testChangeFileRep("COLD", COLD,
+        new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
+            StorageType.ARCHIVE},
         new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
             StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
   }
@@ -1072,4 +1091,28 @@ public class TestBlockStoragePolicy {
     System.out.println(Arrays.asList(targets));
     Assert.assertEquals(3, targets.length);
   }
+
+  /**
+   * Test getting all the storage policies from the namenode
+   */
+  @Test
+  public void testGetAllStoragePolicies() throws Exception {
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(0).build();
+    cluster.waitActive();
+    final DistributedFileSystem fs = cluster.getFileSystem();
+    try {
+      BlockStoragePolicy[] policies = fs.getStoragePolicySuite();
+      Assert.assertEquals(3, policies.length);
+      Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
+          policies[0].toString());
+      Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(),
+          policies[1].toString());
+      Assert.assertEquals(POLICY_SUITE.getPolicy(HOT).toString(),
+          policies[2].toString());
+    } finally {
+      IOUtils.cleanup(null, fs);
+      cluster.shutdown();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/073bbd80/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyCommands.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyCommands.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyCommands.java
index d6ead09..d80356a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyCommands.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyCommands.java
@@ -21,6 +21,8 @@ import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -60,6 +62,11 @@ public class TestStoragePolicyCommands {
     final Path bar = new Path(foo, "bar");
     DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0);
 
+    DFSTestUtil.DFSAdminRun("-getStoragePolicy /foo", 0,
+        "The storage policy of " + foo.toString() + " is unspecified", conf);
+    DFSTestUtil.DFSAdminRun("-getStoragePolicy /foo/bar", 0,
+        "The storage policy of " + bar.toString() + " is unspecified", conf);
+
     DFSTestUtil.DFSAdminRun("-setStoragePolicy /foo WARM", 0,
         "Set storage policy WARM on " + foo.toString(), conf);
     DFSTestUtil.DFSAdminRun("-setStoragePolicy /foo/bar COLD", 0,
@@ -67,8 +74,8 @@ public class TestStoragePolicyCommands {
     DFSTestUtil.DFSAdminRun("-setStoragePolicy /fooz WARM", -1,
         "File/Directory does not exist: /fooz", conf);
 
-    final BlockStoragePolicy.Suite suite = BlockStoragePolicy
-        .readBlockStorageSuite(conf);
+    final BlockStoragePolicySuite suite = BlockStoragePolicySuite
+        .createDefaultSuite();
     final BlockStoragePolicy warm = suite.getPolicy("WARM");
     final BlockStoragePolicy cold = suite.getPolicy("COLD");
     DFSTestUtil.DFSAdminRun("-getStoragePolicy /foo", 0,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/073bbd80/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index e40f142..0001e3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.server.balancer.Dispatcher;
 import org.apache.hadoop.hdfs.server.balancer.ExitStatus;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -87,7 +88,7 @@ public class TestStorageMover {
   private static final short REPL = 3;
   private static final int NUM_DATANODES = 6;
   private static final Configuration DEFAULT_CONF = new HdfsConfiguration();
-  private static final BlockStoragePolicy.Suite DEFAULT_POLICIES;
+  private static final BlockStoragePolicySuite DEFAULT_POLICIES;
   private static final BlockStoragePolicy HOT;
   private static final BlockStoragePolicy WARM;
   private static final BlockStoragePolicy COLD;
@@ -99,7 +100,7 @@ public class TestStorageMover {
         2L);
     DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L);
 
-    DEFAULT_POLICIES = BlockStoragePolicy.readBlockStorageSuite(DEFAULT_CONF);
+    DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite();
     HOT = DEFAULT_POLICIES.getPolicy("HOT");
     WARM = DEFAULT_POLICIES.getPolicy("WARM");
     COLD = DEFAULT_POLICIES.getPolicy("COLD");
@@ -192,13 +193,21 @@ public class TestStorageMover {
 
     private MiniDFSCluster cluster;
     private DistributedFileSystem dfs;
-    private final BlockStoragePolicy.Suite policies;
+    private final BlockStoragePolicySuite policies;
 
     MigrationTest(ClusterScheme cScheme, NamespaceScheme nsScheme) {
       this.clusterScheme = cScheme;
       this.nsScheme = nsScheme;
       this.conf = clusterScheme.conf;
-      this.policies = BlockStoragePolicy.readBlockStorageSuite(conf);
+      this.policies = DEFAULT_POLICIES;
+    }
+
+    MigrationTest(ClusterScheme cScheme, NamespaceScheme nsScheme,
+        BlockStoragePolicySuite policies) {
+      this.clusterScheme = cScheme;
+      this.nsScheme = nsScheme;
+      this.conf = clusterScheme.conf;
+      this.policies = policies;
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/073bbd80/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
index b0f6b6a..267821f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;

Reply via email to