Merge branch 'trunk' into HDFS-6581

Conflicts:
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
        hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
        hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
        
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
        
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
        
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8bbf800
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8bbf800
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8bbf800

Branch: refs/heads/HDFS-6581
Commit: f8bbf80067ac03400acae4655615c9808c538ca8
Parents: e79c98c 6434572
Author: arp <a...@apache.org>
Authored: Thu Sep 18 22:26:42 2014 -0700
Committer: arp <a...@apache.org>
Committed: Thu Sep 18 22:26:42 2014 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |    4 +
 .../hadoop-common/src/main/conf/hadoop-env.sh   |    9 +
 .../crypto/key/kms/KMSClientProvider.java       |    3 +
 .../org/apache/hadoop/net/NetworkTopology.java  |   36 +-
 .../net/NetworkTopologyWithNodeGroup.java       |   23 +-
 .../net/TestNetworkTopologyWithNodeGroup.java   |   12 +-
 .../hadoop-kms/src/site/apt/index.apt.vm        |   26 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   87 ++
 .../hadoop-hdfs/src/main/bin/hdfs               |    7 +
 .../hadoop-hdfs/src/main/bin/hdfs.cmd           |    8 +-
 .../src/main/conf/blockStoragePolicy-site.xml   |   21 +
 .../hadoop-hdfs/src/main/conf/hdfs-site.xml     |    3 +-
 .../apache/hadoop/hdfs/BlockStoragePolicy.java  |  419 +++++++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   19 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   18 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |   64 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |   34 +-
 .../org/apache/hadoop/hdfs/StorageType.java     |    6 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java    |   14 +
 .../hadoop/hdfs/protocol/HdfsConstants.java     |    5 +-
 .../hadoop/hdfs/protocol/HdfsFileStatus.java    |    9 +-
 .../hdfs/protocol/HdfsLocatedFileStatus.java    |    9 +-
 .../protocol/SnapshottableDirectoryStatus.java  |    3 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |   16 +
 .../ClientNamenodeProtocolTranslatorPB.java     |   16 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   37 +-
 .../hadoop/hdfs/server/balancer/Balancer.java   |   19 +-
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  241 ++--
 .../hadoop/hdfs/server/balancer/Matcher.java    |   15 +
 .../hdfs/server/balancer/MovedBlocks.java       |    2 +-
 .../hdfs/server/balancer/NameNodeConnector.java |   79 +-
 .../server/blockmanagement/BlockCollection.java |    5 +
 .../server/blockmanagement/BlockManager.java    |   89 +-
 .../blockmanagement/BlockPlacementPolicy.java   |   22 +-
 .../BlockPlacementPolicyDefault.java            |  377 +++---
 .../BlockPlacementPolicyWithNodeGroup.java      |   73 +-
 .../blockmanagement/DatanodeDescriptor.java     |   52 +-
 .../server/blockmanagement/DatanodeManager.java |    6 +-
 .../blockmanagement/DatanodeStorageInfo.java    |   24 +-
 .../hdfs/server/common/HdfsServerConstants.java |    7 +-
 .../hadoop/hdfs/server/common/Storage.java      |   10 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  165 ++-
 .../server/datanode/fsdataset/FsDatasetSpi.java |    4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  141 ++-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  644 +++++++++++
 .../hdfs/server/namenode/FSDirectory.java       |  122 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |   12 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   12 +-
 .../hdfs/server/namenode/FSEditLogOp.java       |   67 ++
 .../hdfs/server/namenode/FSEditLogOpCodes.java  |    1 +
 .../hdfs/server/namenode/FSImageFormat.java     |    2 +-
 .../server/namenode/FSImageFormatPBINode.java   |    6 +-
 .../hdfs/server/namenode/FSNamesystem.java      |  225 ++--
 .../hadoop/hdfs/server/namenode/INode.java      |   17 +-
 .../hdfs/server/namenode/INodeDirectory.java    |   27 +
 .../namenode/INodeDirectoryAttributes.java      |    4 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   53 +-
 .../server/namenode/INodeFileAttributes.java    |   14 +-
 .../hadoop/hdfs/server/namenode/INodeMap.java   |   11 +
 .../hdfs/server/namenode/INodeReference.java    |   10 +
 .../hdfs/server/namenode/INodeSymlink.java      |   12 +
 .../server/namenode/NameNodeLayoutVersion.java  |    7 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |    8 +-
 .../namenode/RetryStartFileException.java       |   17 +-
 .../snapshot/FSImageFormatPBSnapshot.java       |    2 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |    8 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   75 +-
 .../apache/hadoop/hdfs/util/EnumCounters.java   |    9 +
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    |    8 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |   10 +
 .../hadoop-hdfs/src/main/proto/fsimage.proto    |    3 +-
 .../hadoop-hdfs/src/main/proto/hdfs.proto       |    7 +-
 .../resources/blockStoragePolicy-default.xml    |  118 ++
 .../src/main/resources/hdfs-default.xml         |   16 +-
 .../src/site/apt/ArchivalStorage.apt.vm         |  302 +++++
 .../src/site/apt/HDFSCommands.apt.vm            |   43 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |  123 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   14 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java     | 1075 ++++++++++++++++++
 .../hadoop/hdfs/TestDFSClientRetries.java       |    4 +-
 .../hdfs/TestDFSInotifyEventInputStream.java    |    2 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |    2 +-
 .../java/org/apache/hadoop/hdfs/TestLease.java  |    9 +-
 .../hadoop/hdfs/TestSnapshotCommands.java       |  152 +--
 .../hadoop/hdfs/TestStoragePolicyCommands.java  |   81 ++
 .../hdfs/server/balancer/TestBalancer.java      |    7 +
 .../balancer/TestBalancerWithHANameNodes.java   |    2 +-
 .../TestBalancerWithMultipleNameNodes.java      |    2 +-
 .../balancer/TestBalancerWithNodeGroup.java     |    2 +-
 .../blockmanagement/TestBlockManager.java       |   20 +-
 .../blockmanagement/TestReplicationPolicy.java  |   27 +-
 .../TestReplicationPolicyConsiderLoad.java      |   28 +-
 .../TestReplicationPolicyWithNodeGroup.java     |   17 +-
 .../server/datanode/SimulatedFSDataset.java     |    3 +-
 .../datanode/TestDataNodeHotSwapVolumes.java    |  423 +++++++
 .../fsdataset/impl/TestFsDatasetImpl.java       |   12 +-
 .../hadoop/hdfs/server/mover/TestMover.java     |  222 ++++
 .../hdfs/server/mover/TestStorageMover.java     |  766 +++++++++++++
 .../hdfs/server/namenode/TestAddBlockRetry.java |    4 +-
 .../hdfs/server/namenode/TestDeleteRace.java    |    6 +-
 .../namenode/TestFSPermissionChecker.java       |   33 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |    3 +-
 .../hdfs/server/namenode/TestINodeFile.java     |   29 +-
 .../hdfs/server/namenode/ha/TestDNFencing.java  |    4 +-
 .../apache/hadoop/hdfs/web/TestJsonUtil.java    |    2 +-
 .../apache/hadoop/net/TestNetworkTopology.java  |   40 +-
 .../hadoop-hdfs/src/test/resources/editsStored  |  Bin 5252 -> 4999 bytes
 .../src/test/resources/editsStored.xml          |  422 ++++---
 hadoop-project/src/site/site.xml                |    1 +
 109 files changed, 6605 insertions(+), 1043 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
index 51724f7,7ca9e00..bcbe5c0
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
@@@ -33,9 -33,10 +33,11 @@@ import org.apache.hadoop.classification
  public enum StorageType {
    DISK,
    SSD,
-   RAM_DISK;
 -  ARCHIVE;
++  ARCHIVE,
++  RAM_DISK;  
  
    public static final StorageType DEFAULT = DISK;
+   
    public static final StorageType[] EMPTY_ARRAY = {};
    
    private static final StorageType[] VALUES = values();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 49c5cde,94d9a92..4fa7fd6
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@@ -70,10 -70,10 +71,10 @@@ public class HdfsFileStatus 
     * @param feInfo the file's encryption info
     */
    public HdfsFileStatus(long length, boolean isdir, int block_replication,
 -      long blocksize, long modification_time, long access_time,
 -      FsPermission permission, String owner, String group, byte[] symlink,
 -      byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
 -      byte storagePolicy) {
 +      long blocksize, boolean isLazyPersist, long modification_time,
 +      long access_time, FsPermission permission, String owner,
 +      String group, byte[] symlink, byte[] path, long fileId,
-       int childrenNum, FileEncryptionInfo feInfo) {
++      int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy) {
      this.length = length;
      this.isdir = isdir;
      this.block_replication = (short)block_replication;
@@@ -261,9 -254,14 +263,14 @@@
      return childrenNum;
    }
  
+   /** @return the storage policy id */
+   public final byte getStoragePolicy() {
+     return storagePolicy;
+   }
+ 
    public final FileStatus makeQualified(URI defaultUri, Path path) {
      return new FileStatus(getLen(), isDir(), getReplication(),
 -        getBlockSize(), getModificationTime(),
 +        getBlockSize(), isLazyPersist(), getModificationTime(),
          getAccessTime(),
          getPermission(), getOwner(), getGroup(),
          isSymlink() ? new Path(getSymlink()) : null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
index f70df34,7e602bf..644fb6f
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
@@@ -55,16 -55,16 +55,17 @@@ public class HdfsLocatedFileStatus exte
     * @param feInfo file encryption info
     */
    public HdfsLocatedFileStatus(long length, boolean isdir,
 -      int block_replication, long blocksize, long modification_time,
 -      long access_time, FsPermission permission, String owner, String group,
 -      byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
 -      int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy) {
 -    super(length, isdir, block_replication, blocksize, modification_time,
 -        access_time, permission, owner, group, symlink, path, fileId,
 -        childrenNum, feInfo, storagePolicy);
 +      int block_replication, long blocksize, boolean isLazyPersist,
 +      long modification_time, long access_time, FsPermission permission,
 +      String owner, String group, byte[] symlink, byte[] path, long fileId,
-       LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo) {
++      LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo,
++      byte storagePolicy) {
 +    super(length, isdir, block_replication, blocksize, isLazyPersist,
 +        modification_time, access_time, permission, owner, group, symlink,
-         path, fileId, childrenNum, feInfo);
++        path, fileId, childrenNum, feInfo, storagePolicy);
      this.locations = locations;
    }
-       
+ 
    public LocatedBlocks getBlockLocations() {
      return locations;
    }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index d0cebfa,13acc7a..060a954
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@@ -59,9 -60,9 +60,9 @@@ public class SnapshottableDirectoryStat
        FsPermission permission, String owner, String group, byte[] localName,
        long inodeId, int childrenNum,
        int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
 -    this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
 +    this.dirStatus = new HdfsFileStatus(0, true, 0, 0, false, 
modification_time,
          access_time, permission, owner, group, null, localName, inodeId,
-         childrenNum, null);
+         childrenNum, null, BlockStoragePolicy.ID_UNSPECIFIED);
      this.snapshotNumber = snapshotNumber;
      this.snapshotQuota = snapshotQuota;
      this.parentFullPath = parentFullPath;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 5a21667,862a803..d1abea0
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@@ -1707,8 -1719,8 +1728,10 @@@ public class PBHelper 
        return StorageTypeProto.DISK;
      case SSD:
        return StorageTypeProto.SSD;
+     case ARCHIVE:
+       return StorageTypeProto.ARCHIVE;
 +    case RAM_DISK:
 +      return StorageTypeProto.RAM_DISK;
      default:
        throw new IllegalStateException(
            "BUG: StorageType not found, type=" + type);
@@@ -1737,8 -1749,8 +1760,10 @@@
          return StorageType.DISK;
        case SSD:
          return StorageType.SSD;
+       case ARCHIVE:
+         return StorageType.ARCHIVE;
 +      case RAM_DISK:
 +        return StorageType.RAM_DISK;
        default:
          throw new IllegalStateException(
              "BUG: StorageTypeProto not found, type=" + type);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 464d89a,d28d616..eff904f
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@@ -41,8 -44,7 +44,9 @@@ import javax.management.NotCompliantMBe
  import javax.management.ObjectName;
  import javax.management.StandardMBean;
  
 +import com.google.common.base.Preconditions;
+ import com.google.common.collect.Lists;
 +import org.apache.commons.io.FileUtils;
  import org.apache.commons.logging.Log;
  import org.apache.commons.logging.LogFactory;
  import org.apache.hadoop.classification.InterfaceAudience;
@@@ -88,9 -89,9 +92,10 @@@ import org.apache.hadoop.hdfs.server.pr
  import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
  import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
  import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+ import org.apache.hadoop.io.MultipleIOException;
  import org.apache.hadoop.io.nativeio.NativeIO;
  import org.apache.hadoop.metrics2.util.MBeans;
 +import org.apache.hadoop.util.Daemon;
  import org.apache.hadoop.util.DataChecksum;
  import org.apache.hadoop.util.DiskChecker.DiskErrorException;
  import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
@@@ -255,10 -250,8 +260,10 @@@ class FsDatasetImpl implements FsDatase
            + ", volume failures tolerated: " + volFailuresTolerated);
      }
  
-     storageMap = new HashMap<String, DatanodeStorage>();
+     storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
      volumeMap = new ReplicaMap(this);
 +    lazyWriteReplicaTracker = new LazyWriteReplicaTracker(this);
 +
      @SuppressWarnings("unchecked")
      final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
          ReflectionUtils.newInstance(conf.getClass(
@@@ -289,10 -278,12 +294,12 @@@
      // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
      // nothing needed to be rolled back to make various data structures, e.g.,
      // storageMap and asyncDiskService, consistent.
 -    FsVolumeImpl fsVolume = new FsVolumeImpl(
 +    FsVolumeImpl fsVolume = FsVolumeImplAllocator.createVolume(
          this, sd.getStorageUuid(), dir, this.conf, storageType);
-     fsVolume.getVolumeMap(volumeMap, lazyWriteReplicaTracker);
+     ReplicaMap tempVolumeMap = new ReplicaMap(this);
 -    fsVolume.getVolumeMap(tempVolumeMap);
++    fsVolume.getVolumeMap(tempVolumeMap, lazyWriteReplicaTracker);
  
+     volumeMap.addAll(tempVolumeMap);
      volumes.addVolume(fsVolume);
      storageMap.put(sd.getStorageUuid(),
          new DatanodeStorage(sd.getStorageUuid(),
@@@ -303,6 -294,44 +310,44 @@@
      LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
    }
  
+   private void addVolumeAndBlockPool(Collection<StorageLocation> 
dataLocations,
+       Storage.StorageDirectory sd, final Collection<String> bpids)
+       throws IOException {
+     final File dir = sd.getCurrentDir();
+     final StorageType storageType =
+         getStorageTypeFromLocations(dataLocations, sd.getRoot());
+ 
+     final FsVolumeImpl fsVolume = new FsVolumeImpl(
+         this, sd.getStorageUuid(), dir, this.conf, storageType);
+     final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume);
+ 
+     List<IOException> exceptions = Lists.newArrayList();
+     for (final String bpid : bpids) {
+       try {
+         fsVolume.addBlockPool(bpid, this.conf);
 -        fsVolume.getVolumeMap(bpid, tempVolumeMap);
++        fsVolume.getVolumeMap(bpid, tempVolumeMap, lazyWriteReplicaTracker);
+       } catch (IOException e) {
+         LOG.warn("Caught exception when adding " + fsVolume +
+             ". Will throw later.", e);
+         exceptions.add(e);
+       }
+     }
+     if (!exceptions.isEmpty()) {
+       // The states of FsDatasteImpl are not modified, thus no need to rolled 
back.
+       throw MultipleIOException.createIOException(exceptions);
+     }
+ 
+     volumeMap.addAll(tempVolumeMap);
+     storageMap.put(sd.getStorageUuid(),
+         new DatanodeStorage(sd.getStorageUuid(),
+             DatanodeStorage.State.NORMAL,
+             storageType));
+     asyncDiskService.addVolume(sd.getCurrentDir());
+     volumes.addVolume(fsVolume);
+ 
+     LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
+   }
+ 
    /**
     * Add an array of StorageLocation to FsDataset.
     *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 8ea653a,9346ea5..cf88444
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@@ -273,6 -274,12 +274,14 @@@ public class FSDirectory implements Clo
      skipQuotaCheck = true;
    }
  
+   private static INodeFile newINodeFile(long id, PermissionStatus permissions,
 -      long mtime, long atime, short replication, long preferredBlockSize) {
++      long mtime, long atime, short replication, long preferredBlockSize,
++      boolean isLazyPersist) {
+     return new INodeFile(id, null, permissions, mtime, atime,
 -        BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize, (byte)0);
++        BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize,
++        isLazyPersist, (byte) 0);
+   }
+ 
    /**
     * Add the given filename to the fs.
     * @throws FileAlreadyExistsException
@@@ -288,9 -294,8 +297,9 @@@
        UnresolvedLinkException, SnapshotAccessControlException, AclException {
  
      long modTime = now();
-     INodeFile newNode = new INodeFile(namesystem.allocateNewInodeId(), null,
-         permissions, modTime, modTime, BlockInfo.EMPTY_ARRAY, replication,
-         preferredBlockSize, isLazyPersist);
+     INodeFile newNode = newINodeFile(namesystem.allocateNewInodeId(),
 -        permissions, modTime, modTime, replication, preferredBlockSize);
++        permissions, modTime, modTime, replication, preferredBlockSize,
++        isLazyPersist);
      newNode.toUnderConstruction(clientName, clientMachine);
  
      boolean added = false;
@@@ -327,14 -331,13 +336,13 @@@
      final INodeFile newNode;
      assert hasWriteLock();
      if (underConstruction) {
-       newNode = new INodeFile(id, null, permissions, modificationTime,
-           modificationTime, BlockInfo.EMPTY_ARRAY, replication,
-           preferredBlockSize, isLazyPersist);
+       newNode = newINodeFile(id, permissions, modificationTime,
 -          modificationTime, replication, preferredBlockSize);
++          modificationTime, replication, preferredBlockSize, isLazyPersist);
        newNode.toUnderConstruction(clientName, clientMachine);
  
      } else {
-       newNode = new INodeFile(id, null, permissions, modificationTime, atime,
-           BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize, 
isLazyPersist);
+       newNode = newINodeFile(id, permissions, modificationTime, atime,
 -          replication, preferredBlockSize);
++          replication, preferredBlockSize, isLazyPersist);
      }
  
      try {
@@@ -1470,8 -1526,8 +1531,9 @@@
    private HdfsFileStatus getFileInfo4DotSnapshot(String src)
        throws UnresolvedLinkException {
      if (getINode4DotSnapshot(src) != null) {
 -      return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
 -          HdfsFileStatus.EMPTY_NAME, -1L, 0, null, 
BlockStoragePolicy.ID_UNSPECIFIED);
 +      return new HdfsFileStatus(0, true, 0, 0, false, 0, 0, null, null, null, 
null,
-           HdfsFileStatus.EMPTY_NAME, -1L, 0, null);
++          HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
++          BlockStoragePolicy.ID_UNSPECIFIED);
      }
      return null;
    }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index a9e4407,af3cf2c..f53afe1
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@@ -887,10 -884,8 +887,10 @@@ public class FSImageFormat 
            in.readShort());
        final long preferredBlockSize = in.readLong();
  
 +      // LazyPersist flag will not be present in old image formats and hence
 +      // can be safely set to false always.
        return new INodeFileAttributes.SnapshotCopy(name, permissions, null, 
modificationTime,
-           accessTime, replication, preferredBlockSize, false, null);
 -          accessTime, replication, preferredBlockSize, (byte)0, null);
++          accessTime, replication, preferredBlockSize, false, (byte) 0, null);
      }
  
      public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 2088f18,321a148..81bce86
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@@ -291,7 -291,7 +291,8 @@@ public final class FSImageFormatPBINod
        final INodeFile file = new INodeFile(n.getId(),
            n.getName().toByteArray(), permissions, f.getModificationTime(),
            f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(),
-           f.hasIsLazyPersist() ? f.getIsLazyPersist() : false);
++          f.hasIsLazyPersist() ? f.getIsLazyPersist() : false,
+           (byte)f.getStoragePolicyID());
  
        if (f.hasAcl()) {
          file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(),
@@@ -403,7 -403,7 +404,8 @@@
            .setPermission(buildPermissionStatus(file, state.getStringMap()))
            .setPreferredBlockSize(file.getPreferredBlockSize())
            .setReplication(file.getFileReplication())
-           .setIsLazyPersist(file.getLazyPersistFlag());
++          .setIsLazyPersist(file.getLazyPersistFlag())
+           .setStoragePolicyID(file.getLocalStoragePolicyID());
  
        AclFeature f = file.getAclFeature();
        if (f != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index fee0d7e,2b03362..466d9aa
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@@ -2476,84 -2482,66 +2515,66 @@@ public class FSNamesystem implements Na
  
      waitForLoadingFSImage();
  
-     /*
-      * We want to avoid holding any locks while doing KeyProvider operations,
-      * since they can be very slow. Since the path can
-      * flip flop between being in an encryption zone and not in the meantime,
-      * we need to recheck the preconditions and redo KeyProvider operations
-      * in some situations.
-      *
-      * A special RetryStartFileException is used to indicate that we should
-      * retry creation of a FileEncryptionInfo.
+     /**
+      * If the file is in an encryption zone, we optimistically create an
+      * EDEK for the file by calling out to the configured KeyProvider.
+      * Since this typically involves doing an RPC, we take the readLock
+      * initially, then drop it to do the RPC.
+      * 
+      * Since the path can flip-flop between being in an encryption zone and 
not
+      * in the meantime, we need to recheck the preconditions when we retake 
the
+      * lock to do the create. If the preconditions are not met, we throw a
+      * special RetryStartFileException to ask the DFSClient to try the create
+      * again later.
       */
-     BlocksMapUpdateInfo toRemoveBlocks = null;
+     CipherSuite suite = null;
+     String ezKeyName = null;
+     readLock();
      try {
-       boolean shouldContinue = true;
-       int iters = 0;
-       while (shouldContinue) {
-         skipSync = false;
-         if (iters >= 10) {
-           throw new IOException("Too many retries because of encryption zone 
" +
-               "operations, something might be broken!");
-         }
-         shouldContinue = false;
-         iters++;
- 
-         // Optimistically determine CipherSuite and ezKeyName if the path is
-         // currently within an encryption zone
-         CipherSuite suite = null;
-         String ezKeyName = null;
-         readLock();
-         try {
-           src = resolvePath(src, pathComponents);
-           INodesInPath iip = dir.getINodesInPath4Write(src);
-           // Nothing to do if the path is not within an EZ
-           if (dir.isInAnEZ(iip)) {
-             suite = chooseCipherSuite(iip, cipherSuites);
-             if (suite != null) {
-               Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN),
-                   "Chose an UNKNOWN CipherSuite!");
-             }
-             ezKeyName = dir.getKeyName(iip);
-             Preconditions.checkState(ezKeyName != null);
-           }
-         } finally {
-           readUnlock();
+       src = resolvePath(src, pathComponents);
+       INodesInPath iip = dir.getINodesInPath4Write(src);
+       // Nothing to do if the path is not within an EZ
+       if (dir.isInAnEZ(iip)) {
+         suite = chooseCipherSuite(iip, cipherSuites);
+         if (suite != null) {
+           Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN),
+               "Chose an UNKNOWN CipherSuite!");
          }
+         ezKeyName = dir.getKeyName(iip);
+         Preconditions.checkState(ezKeyName != null);
+       }
+     } finally {
+       readUnlock();
+     }
  
-         Preconditions.checkState(
-             (suite == null && ezKeyName == null) ||
+     Preconditions.checkState(
+         (suite == null && ezKeyName == null) ||
              (suite != null && ezKeyName != null),
-             "Both suite and ezKeyName should both be null or not null");
-         // Generate EDEK if necessary while not holding the lock
-         EncryptedKeyVersion edek =
-             generateEncryptedDataEncryptionKey(ezKeyName);
-         EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
-         // Try to create the file with the computed cipher suite and EDEK
-         writeLock();
-         try {
-           checkOperation(OperationCategory.WRITE);
-           checkNameNodeSafeMode("Cannot create file" + src);
-           src = resolvePath(src, pathComponents);
-           toRemoveBlocks = startFileInternal(pc, src, permissions, holder, 
-               clientMachine, create, overwrite, createParent, replication, 
-               blockSize, isLazyPersist, suite, edek, logRetryCache);
-           stat = dir.getFileInfo(src, false,
-               FSDirectory.isReservedRawName(srcArg));
-         } catch (StandbyException se) {
-           skipSync = true;
-           throw se;
-         } catch (RetryStartFileException e) {
-           shouldContinue = true;
-           if (LOG.isTraceEnabled()) {
-             LOG.trace("Preconditions failed, retrying creation of " +
-                     "FileEncryptionInfo", e);
-           }
-         } finally {
-           writeUnlock();
-         }
-       }
+         "Both suite and ezKeyName should both be null or not null");
+ 
+     // Generate EDEK if necessary while not holding the lock
+     EncryptedKeyVersion edek =
+         generateEncryptedDataEncryptionKey(ezKeyName);
+     EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
+ 
+     // Proceed with the create, using the computed cipher suite and 
+     // generated EDEK
+     BlocksMapUpdateInfo toRemoveBlocks = null;
+     writeLock();
+     try {
+       checkOperation(OperationCategory.WRITE);
+       checkNameNodeSafeMode("Cannot create file" + src);
+       src = resolvePath(src, pathComponents);
+       toRemoveBlocks = startFileInternal(pc, src, permissions, holder, 
+           clientMachine, create, overwrite, createParent, replication, 
 -          blockSize, suite, edek, logRetryCache);
++          blockSize, isLazyPersist, suite, edek, logRetryCache);
+       stat = dir.getFileInfo(src, false,
+           FSDirectory.isReservedRawName(srcArg), false);
+     } catch (StandbyException se) {
+       skipSync = true;
+       throw se;
      } finally {
+       writeUnlock();
        // There might be transactions logged while trying to recover the lease.
        // They need to be sync'ed even when an exception was thrown.
        if (!skipSync) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 35f15ad,7af2b71..2d39063
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@@ -71,12 -72,14 +72,15 @@@ public class INodeFile extends INodeWit
      return inode.asFile();
    }
  
-   /** Format: [16 bits for replication][48 bits for PreferredBlockSize] */
+   /** 
+    * Bit format:
+    * [4-bit storagePolicyID][12-bit replication][48-bit preferredBlockSize]
+    */
    static enum HeaderFormat {
      PREFERRED_BLOCK_SIZE(null, 48, 1),
--    REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 12, 1),
-     LAZY_PERSIST(REPLICATION.BITS, 4, 0);
- 
 -    STORAGE_POLICY_ID(REPLICATION.BITS, BlockStoragePolicy.ID_BIT_LENGTH, 0);
++    REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 11, 1),
++    STORAGE_POLICY_ID(REPLICATION.BITS, BlockStoragePolicy.ID_BIT_LENGTH, 0),
++    LAZY_PERSIST(STORAGE_POLICY_ID.BITS, 1, 0);
  
      private final LongBitFormat BITS;
  
@@@ -92,18 -95,18 +96,24 @@@
        return PREFERRED_BLOCK_SIZE.BITS.retrieve(header);
      }
  
 +    static boolean getLazyPersistFlag(long header) {
 +      return LAZY_PERSIST.BITS.retrieve(header) == 0 ? false : true;
 +    }
 +
-     static long toLong(long preferredBlockSize, short replication, boolean 
isLazyPersist) {
+     static byte getStoragePolicyID(long header) {
+       return (byte)STORAGE_POLICY_ID.BITS.retrieve(header);
+     }
+ 
+     static long toLong(long preferredBlockSize, short replication,
 -        byte storagePolicyID) {
++        boolean isLazyPersist, byte storagePolicyID) {
        long h = 0;
        h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
        h = REPLICATION.BITS.combine(replication, h);
+       h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);
 +      h = LAZY_PERSIST.BITS.combine(isLazyPersist ? 1 : 0, h);
        return h;
      }
 +
    }
  
    private long header = 0L;
@@@ -111,17 -114,11 +121,18 @@@
    private BlockInfo[] blocks;
  
    INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
 +            long atime, BlockInfo[] blklist, short replication,
 +            long preferredBlockSize) {
 +    this(id, name, permissions, mtime, atime, blklist, replication,
-          preferredBlockSize, false);
++         preferredBlockSize, false, (byte) 0);
 +  }
 +
 +  INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
        long atime, BlockInfo[] blklist, short replication,
-       long preferredBlockSize, boolean isLazyPersist) {
 -      long preferredBlockSize, byte storagePolicyID) {
++      long preferredBlockSize, boolean isLazyPersist, byte storagePolicyID) {
      super(id, name, permissions, mtime, atime);
-     header = HeaderFormat.toLong(preferredBlockSize, replication, 
isLazyPersist);
+     header = HeaderFormat.toLong(preferredBlockSize, replication,
 -        storagePolicyID);
++        isLazyPersist, storagePolicyID);
      this.blocks = blklist;
    }
    
@@@ -370,11 -367,32 +381,37 @@@
    }
  
    @Override
 +  public boolean getLazyPersistFlag() {
 +    return HeaderFormat.getLazyPersistFlag(header);
 +  }
 +
 +  @Override
+   public byte getLocalStoragePolicyID() {
+     return HeaderFormat.getStoragePolicyID(header);
+   }
+ 
+   @Override
+   public byte getStoragePolicyID() {
+     byte id = getLocalStoragePolicyID();
+     if (id == BlockStoragePolicy.ID_UNSPECIFIED) {
+       return this.getParent() != null ?
+           this.getParent().getStoragePolicyID() : id;
+     }
+     return id;
+   }
+ 
+   private void setStoragePolicyID(byte storagePolicyId) {
+     header = HeaderFormat.STORAGE_POLICY_ID.BITS.combine(storagePolicyId,
+         header);
+   }
+ 
+   public final void setStoragePolicyID(byte storagePolicyId,
+       int latestSnapshotId) throws QuotaExceededException {
+     recordModification(latestSnapshotId);
+     setStoragePolicyID(storagePolicyId);
+   }
+ 
+   @Override
    public long getHeaderLong() {
      return header;
    }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
index 64ee1fc,f9d2700..c9dd66d
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
@@@ -46,11 -47,11 +48,12 @@@ public interface INodeFileAttributes ex
  
      public SnapshotCopy(byte[] name, PermissionStatus permissions,
          AclFeature aclFeature, long modificationTime, long accessTime,
-         short replication, long preferredBlockSize,
-         boolean isTransient, XAttrFeature xAttrsFeature) {
 -        short replication, long preferredBlockSize, byte storagePolicyID,
 -        XAttrFeature xAttrsFeature) {
++        short replication, long preferredBlockSize, boolean isLazyPersist,
++        byte storagePolicyID, XAttrFeature xAttrsFeature) {
        super(name, permissions, aclFeature, modificationTime, accessTime, 
            xAttrsFeature);
-       header = HeaderFormat.toLong(preferredBlockSize, replication, 
isTransient);
 -      header = HeaderFormat.toLong(preferredBlockSize, replication, 
storagePolicyID);
++      header = HeaderFormat.toLong(preferredBlockSize, replication,
++          isLazyPersist, storagePolicyID);
      }
  
      public SnapshotCopy(INodeFile file) {
@@@ -69,9 -70,11 +72,14 @@@
      }
  
      @Override
 +    public boolean getLazyPersistFlag() { return 
HeaderFormat.getLazyPersistFlag(header); }
 +
 +    @Override
+     public byte getLocalStoragePolicyID() {
+       return HeaderFormat.getStoragePolicyID(header);
+     }
+ 
+     @Override
      public long getHeaderLong() {
        return header;
      }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
index 6913c29,512913b..323b239
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
@@@ -67,11 -67,10 +67,12 @@@ public class NameNodeLayoutVersion 
      EDITLOG_LENGTH(-56, "Add length field to every edit log op"),
      XATTRS(-57, "Extended attributes"),
      CREATE_OVERWRITE(-58, "Use single editlog record for " +
-         "creating file with overwrite"),
+       "creating file with overwrite"),
      XATTRS_NAMESPACE_EXT(-59, "Increase number of xattr namespaces"),
 -    BLOCK_STORAGE_POLICY(-60, "Block Storage policy");
++    BLOCK_STORAGE_POLICY(-60, "Block Storage policy"),
 +    LAZY_PERSIST_FILES(-60, "Support for optional lazy persistence of " + 
-         " files with reduced durability guarantees");
- 
++        " files with reduced durability guarantees");    
+     
      private final FeatureInfo info;
  
      /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
index 6e00c17,ff33225..0312f6e
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
@@@ -221,8 -221,7 +221,8 @@@ public class FSImageFormatPBSnapshot 
                .toByteArray(), permission, acl, fileInPb.getModificationTime(),
                fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
                fileInPb.getPreferredBlockSize(),
 +              fileInPb.hasIsLazyPersist() ? fileInPb.getIsLazyPersist() : 
false,
-               xAttrs);
+               (byte)fileInPb.getStoragePolicyID(), xAttrs);
          }
  
          FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index adbc927,2b3d7e6..4a79689
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@@ -262,9 -262,12 +264,13 @@@ public class JsonUtil 
      Long childrenNumLong = (Long) m.get("childrenNum");
      final int childrenNum = (childrenNumLong == null) ? -1
              : childrenNumLong.intValue();
+     final byte storagePolicy = m.containsKey("storagePolicy") ?
+         (byte) (long) (Long) m.get("storagePolicy") :
+           BlockStoragePolicy.ID_UNSPECIFIED;
      return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
 -        blockSize, mTime, aTime, permission, owner, group, symlink,
 -        DFSUtil.string2Bytes(localName), fileId, childrenNum, null, 
storagePolicy);
 +        blockSize, isLazyPersist, mTime, aTime, permission, owner, group,
-         symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null);
++        symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null,
++        storagePolicy);
    }
  
    /** Convert an ExtendedBlock to a Json map. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
index 1db9366,588f6c8..85edfab
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
@@@ -138,7 -138,7 +138,8 @@@ message INodeSection 
      optional FileUnderConstructionFeature fileUC = 7;
      optional AclFeatureProto acl = 8;
      optional XAttrFeatureProto xAttrs = 9;
-     optional bool isLazyPersist = 10 [default = false];
+     optional uint32 storagePolicyID = 10;
++    optional bool isLazyPersist = 11 [default = false];
    }
  
    message INodeDirectory {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index b54638e,f12055c..cbfc1d3
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@@ -158,7 -158,7 +158,8 @@@ message FsPermissionProto 
  enum StorageTypeProto {
    DISK = 1;
    SSD = 2;
-   RAM_DISK = 3;
+   ARCHIVE = 3;
++  RAM_DISK = 4;
  }
  
  /**
@@@ -261,9 -261,11 +262,11 @@@ message HdfsFileStatusProto 
    // Optional field for fileId
    optional uint64 fileId = 13 [default = 0]; // default as an invalid id
    optional int32 childrenNum = 14 [default = -1];
 -
    // Optional field for file encryption
    optional FileEncryptionInfoProto fileEncryptionInfo = 15;
-   optional bool isLazyPersist = 16 [default = false];
+ 
+   optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
++  optional bool isLazyPersist = 17 [default = false];
  } 
  
  /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 83191c3,7be6a49..a9f512f
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@@ -88,11 -93,7 +93,8 @@@ import java.security.PrivilegedExceptio
  import java.util.*;
  import java.util.concurrent.TimeoutException;
  
- import static org.apache.hadoop.fs.CreateFlag.CREATE;
- import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
- import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
- import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
- import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
++import static org.apache.hadoop.fs.CreateFlag.*;
+ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
  import static org.junit.Assert.assertEquals;
  import static org.junit.Assert.assertTrue;
  import static org.junit.Assert.fail;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index f0ffb2b,5659e92..558e974
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@@ -253,14 -253,14 +253,14 @@@ public class TestDFSClientRetries 
                           anyLong(), any(String[].class))).thenAnswer(answer);
      
      Mockito.doReturn(
 -            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
 +            new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new 
FsPermission(
                  (short) 777), "owner", "group", new byte[0], new byte[0],
-                 1010, 0, null)).when(mockNN).getFileInfo(anyString());
+                 1010, 0, null, (byte) 
0)).when(mockNN).getFileInfo(anyString());
      
      Mockito.doReturn(
 -            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
 +            new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new 
FsPermission(
                  (short) 777), "owner", "group", new byte[0], new byte[0],
-                 1010, 0, null))
+                 1010, 0, null, (byte) 0))
          .when(mockNN)
          .create(anyString(), (FsPermission) anyObject(), anyString(),
              (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
index f3c0911,6119b6e..b323c1b
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
@@@ -340,14 -341,14 +341,14 @@@ public class TestLease 
      }
  
      Mockito.doReturn(
 -        new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
 +        new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new FsPermission(
              (short) 777), "owner", "group", new byte[0], new byte[0],
-             1010, 0, null)).when(mcp).getFileInfo(anyString());
+             1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString());
      Mockito
          .doReturn(
 -            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
 +            new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new 
FsPermission(
                  (short) 777), "owner", "group", new byte[0], new byte[0],
-                 1010, 0, null))
+                 1010, 0, null, (byte) 0))
          .when(mcp)
          .create(anyString(), (FsPermission) anyObject(), anyString(),
              (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 380f6ec,8d298ae..7a0ca63
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@@ -1015,10 -1015,11 +1015,11 @@@ public class TestFsck 
      path = DFSUtil.string2Bytes(pathString);
      long fileId = 312321L;
      int numChildren = 1;
+     byte storagePolicy = 0;
  
      HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
 -        blockSize, modTime, accessTime, perms, owner, group, symlink, path,
 -        fileId, numChildren, null, storagePolicy);
 +        blockSize, false, modTime, accessTime, perms, owner, group, symlink,
-         path, fileId, numChildren, null);
++        path, fileId, numChildren, null, storagePolicy);
      Result res = new Result(conf);
  
      try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
index a739b7a,26d9a96..df42a59
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
@@@ -82,8 -82,32 +82,32 @@@ public class TestINodeFile 
  
    INodeFile createINodeFile(short replication, long preferredBlockSize) {
      return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
 -        null, replication, preferredBlockSize, (byte)0);
 +        null, replication, preferredBlockSize);
    }
+ 
+   private static INodeFile createINodeFile(byte storagePolicyID) {
+     return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
 -        null, (short)3, 1024L, storagePolicyID);
++        null, (short)3, 1024L, false, storagePolicyID);
+   }
+ 
+   @Test
+   public void testStoragePolicyID () {
+     for(byte i = 0; i < 16; i++) {
+       final INodeFile f = createINodeFile(i);
+       assertEquals(i, f.getStoragePolicyID());
+     }
+   }
+ 
+   @Test(expected=IllegalArgumentException.class)
+   public void testStoragePolicyIdBelowLowerBound () throws 
IllegalArgumentException {
+     createINodeFile((byte)-1);
+   }
+ 
+   @Test(expected=IllegalArgumentException.class)
+   public void testStoragePolicyIdAboveUpperBound () throws 
IllegalArgumentException {
+     createINodeFile((byte)16);
+   }
+ 
    /**
     * Test for the Replication value. Sets a value and checks if it was set
     * correct.
@@@ -316,7 -340,8 +340,8 @@@
  
      {//cast from INodeFileUnderConstruction
        final INode from = new INodeFile(
-           INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, 
replication, 1024L);
+           INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication,
 -          1024L, (byte)0);
++          1024L);
        from.asFile().toUnderConstruction("client", "machine");
      
        //cast to INodeFile, should success

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bbf800/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index ec82c48,3eba7db..4e9691f
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@@ -62,9 -62,9 +62,9 @@@ public class TestJsonUtil 
      final long now = Time.now();
      final String parent = "/dir";
      final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 
26,
 -        now, now + 10, new FsPermission((short) 0644), "user", "group",
 +        false, now, now + 10, new FsPermission((short) 0644), "user", "group",
          DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
-         INodeId.GRANDFATHER_INODE_ID, 0, null);
+         INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
      final FileStatus fstatus = toFileStatus(status, parent);
      System.out.println("status  = " + status);
      System.out.println("fstatus = " + fstatus);

Reply via email to