[46/50] [abbrv] hadoop git commit: Merge commit '456e901a4c5c639267ee87b8e5f1319f256d20c2' (HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. Contributed by Haohui Mai.) into

2015-08-24 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 36ce133,508da85..dfea5f3
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@@ -42,7 -44,8 +44,9 @@@ import javax.management.ObjectName
  import org.apache.hadoop.HadoopIllegalArgumentException;
  import org.apache.hadoop.classification.InterfaceAudience;
  import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.FileEncryptionInfo;
  import org.apache.hadoop.fs.StorageType;
+ import org.apache.hadoop.hdfs.DFSUtilClient;
  import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
  import org.apache.hadoop.hdfs.DFSConfigKeys;
  import org.apache.hadoop.hdfs.DFSUtil;
@@@ -52,12 -55,10 +56,11 @@@ import org.apache.hadoop.hdfs.protocol.
  import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
  import org.apache.hadoop.hdfs.protocol.DatanodeID;
  import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 +import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
  import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
--import org.apache.hadoop.fs.FileEncryptionInfo;
  import org.apache.hadoop.hdfs.protocol.LocatedBlock;
  import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 +import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
  import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
  import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
  import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
@@@ -77,22 -79,17 +81,24 @@@ import org.apache.hadoop.hdfs.server.pr
  import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
  import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
  import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 +import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
  import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
  import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
  import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
  import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
  import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
  import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
  import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 +
 +import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
 +import static 
org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
 +
+ import org.apache.hadoop.metrics2.util.MBeans;
  import org.apache.hadoop.net.Node;
  import org.apache.hadoop.security.UserGroupInformation;
 +import org.apache.hadoop.security.token.Token;
  import org.apache.hadoop.util.Daemon;
  import org.apache.hadoop.util.LightWeightGSet;
  import org.apache.hadoop.util.Time;
@@@ -818,11 -786,12 +835,11 @@@ public class BlockManager implements Bl
  }
  return locations;
}
--  
 -  private ListLocatedBlock createLocatedBlockList(
 -  final BlockInfo[] blocks,
++
 +  private ListLocatedBlock createLocatedBlockList(final BlockInfo[] blocks,
final long offset, final long length, final int nrBlocksToReturn,
final AccessMode mode) throws IOException {
 -int curBlk = 0;
 +int curBlk;
  long curPos = 0, blkSize = 0;
  int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
  for (curBlk = 0; curBlk  nrBlocks; curBlk++) {
@@@ -875,25 -844,19 +892,26 @@@
}
  
/** @return a LocatedBlock for the given block */
-   private LocatedBlock createLocatedBlock(final BlockInfo blk, final long 
pos) {
 -  private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos
 -  ) throws IOException {
 -if (blk instanceof BlockInfoContiguousUnderConstruction) {
 -  if (blk.isComplete()) {
 -throw new IOException(
 -blk instanceof BlockInfoUnderConstruction  blk.isComplete()
 -+ , blk= + blk);
++  private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos)
++  throws IOException {
 +if (!blk.isComplete()) {
 +  if (blk.isStriped()) {
- final BlockInfoUnderConstructionStriped uc =
- (BlockInfoUnderConstructionStriped) blk;
++final BlockInfoStripedUnderConstruction uc =
++(BlockInfoStripedUnderConstruction) blk;
 +

[46/50] [abbrv] hadoop git commit: Merge commit '456e901a4c5c639267ee87b8e5f1319f256d20c2' (HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. Contributed by Haohui Mai.) into

2015-08-20 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc10933a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 36ce133,508da85..a64e50c
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@@ -42,7 -44,8 +44,9 @@@ import javax.management.ObjectName
  import org.apache.hadoop.HadoopIllegalArgumentException;
  import org.apache.hadoop.classification.InterfaceAudience;
  import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.FileEncryptionInfo;
  import org.apache.hadoop.fs.StorageType;
+ import org.apache.hadoop.hdfs.DFSUtilClient;
  import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
  import org.apache.hadoop.hdfs.DFSConfigKeys;
  import org.apache.hadoop.hdfs.DFSUtil;
@@@ -52,12 -55,10 +56,11 @@@ import org.apache.hadoop.hdfs.protocol.
  import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
  import org.apache.hadoop.hdfs.protocol.DatanodeID;
  import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 +import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
  import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
--import org.apache.hadoop.fs.FileEncryptionInfo;
  import org.apache.hadoop.hdfs.protocol.LocatedBlock;
  import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 +import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
  import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
  import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
  import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
@@@ -77,22 -79,17 +81,24 @@@ import org.apache.hadoop.hdfs.server.pr
  import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
  import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
  import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 +import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
  import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
  import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
  import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
  import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
  import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
  import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
  import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 +
 +import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
 +import static 
org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
 +
+ import org.apache.hadoop.metrics2.util.MBeans;
  import org.apache.hadoop.net.Node;
  import org.apache.hadoop.security.UserGroupInformation;
 +import org.apache.hadoop.security.token.Token;
  import org.apache.hadoop.util.Daemon;
  import org.apache.hadoop.util.LightWeightGSet;
  import org.apache.hadoop.util.Time;
@@@ -818,11 -786,12 +835,11 @@@ public class BlockManager implements Bl
  }
  return locations;
}
--  
 -  private ListLocatedBlock createLocatedBlockList(
 -  final BlockInfo[] blocks,
++
 +  private ListLocatedBlock createLocatedBlockList(final BlockInfo[] blocks,
final long offset, final long length, final int nrBlocksToReturn,
final AccessMode mode) throws IOException {
 -int curBlk = 0;
 +int curBlk;
  long curPos = 0, blkSize = 0;
  int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
  for (curBlk = 0; curBlk  nrBlocks; curBlk++) {
@@@ -875,25 -844,19 +892,26 @@@
}
  
/** @return a LocatedBlock for the given block */
-   private LocatedBlock createLocatedBlock(final BlockInfo blk, final long 
pos) {
+   private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos
 -  ) throws IOException {
 -if (blk instanceof BlockInfoContiguousUnderConstruction) {
 -  if (blk.isComplete()) {
 -throw new IOException(
 -blk instanceof BlockInfoUnderConstruction  blk.isComplete()
 -+ , blk= + blk);
++  ) throws IOException {
 +if (!blk.isComplete()) {
 +  if (blk.isStriped()) {
- final BlockInfoUnderConstructionStriped uc =
- (BlockInfoUnderConstructionStriped) blk;
++final BlockInfoStripedUnderConstruction uc =
++(BlockInfoStripedUnderConstruction) blk;
 +final DatanodeStorageInfo[] storages = 
uc.getExpectedStorageLocations();
 +