[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2018-02-15 Thread aengineer
Merge branch 'trunk' into HDFS-7240

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

Added the following code in:
hadoop/ozone/container/common/impl/ContainerManagerImpl.java
  @Override
  public void readLockInterruptibly() throws InterruptedException {
this.lock.readLock().lockInterruptibly();
  }

and Manually updated  the value of version in
modified: hadoop-tools/hadoop-ozone/pom.xml
to
3.2.0-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47919787
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47919787
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47919787

Branch: refs/heads/HDFS-7240
Commit: 479197872ba89159ec2160fbdda92a1665362b5d
Parents: fc84744 4747395
Author: Anu Engineer 
Authored: Thu Feb 15 15:28:08 2018 -0800
Committer: Anu Engineer 
Committed: Thu Feb 15 15:28:08 2018 -0800

--
 hadoop-assemblies/pom.xml   |   4 +-
 hadoop-build-tools/pom.xml  |   2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml |   4 +-
 .../hadoop-client-check-invariants/pom.xml  |   4 +-
 .../hadoop-client-check-test-invariants/pom.xml |   4 +-
 .../hadoop-client-integration-tests/pom.xml |   4 +-
 .../hadoop-client-minicluster/pom.xml   |   4 +-
 .../hadoop-client-runtime/pom.xml   |   4 +-
 hadoop-client-modules/hadoop-client/pom.xml |   4 +-
 hadoop-client-modules/pom.xml   |   2 +-
 .../hadoop-cloud-storage/pom.xml|   4 +-
 hadoop-cloud-storage-project/pom.xml|   4 +-
 .../hadoop-annotations/pom.xml  |   4 +-
 .../hadoop-auth-examples/pom.xml|   4 +-
 hadoop-common-project/hadoop-auth/pom.xml   |  14 +-
 .../client/AuthenticatorTestCase.java   |  51 +-
 .../client/TestKerberosAuthenticator.java   |  41 +-
 hadoop-common-project/hadoop-common/pom.xml |   4 +-
 .../hadoop-common/src/main/bin/hadoop   |   4 +
 .../hadoop-common/src/main/bin/hadoop.cmd   |   7 +-
 .../org/apache/hadoop/conf/Configuration.java   |  80 ++
 .../org/apache/hadoop/conf/StorageSize.java | 106 +++
 .../org/apache/hadoop/conf/StorageUnit.java | 530 +++
 .../hadoop/fs/CommonConfigurationKeys.java  |   2 +-
 .../java/org/apache/hadoop/fs/FileUtil.java | 257 +-
 .../org/apache/hadoop/fs/LocalFileSystem.java   |   2 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |  30 +-
 .../apache/hadoop/ha/FailoverController.java|  20 +-
 .../org/apache/hadoop/ha/HealthMonitor.java |   9 +-
 .../org/apache/hadoop/http/HttpServer2.java |   2 +-
 .../org/apache/hadoop/io/retry/RetryUtils.java  |  11 +-
 .../main/java/org/apache/hadoop/net/DNS.java|  39 +-
 .../AbstractDelegationTokenSecretManager.java   |   6 +
 .../apache/hadoop/service/AbstractService.java  |  27 +-
 .../hadoop/service/ServiceOperations.java   |   6 +-
 .../org/apache/hadoop/util/CombinedIPList.java  |  59 ++
 .../hadoop/util/GenericOptionsParser.java   |   8 +-
 .../apache/hadoop/util/JsonSerialization.java   |   8 +
 .../java/org/apache/hadoop/util/RunJar.java |  69 ++
 .../src/main/resources/core-default.xml |  13 +-
 .../src/site/markdown/CommandsManual.md |   6 +
 .../src/site/markdown/SecureMode.md |  32 +-
 .../apache/hadoop/conf/TestConfiguration.java   |  76 ++
 .../org/apache/hadoop/conf/TestStorageUnit.java | 277 ++
 .../hadoop/fs/TestDelegateToFileSystem.java |   2 +-
 .../java/org/apache/hadoop/fs/TestFileUtil.java |  86 ++
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |   2 +-
 .../hadoop/service/TestServiceOperations.java   |   3 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java |  40 +-
 .../apache/hadoop/test/TestLambdaTestUtils.java |  36 +
 .../java/org/apache/hadoop/util/TestRunJar.java |  57 ++
 .../src/test/scripts/start-build-env.bats   | 102 +++
 hadoop-common-project/hadoop-kms/pom.xml|   4 +-
 .../hadoop/crypto/key/kms/server/KMS.java   |   4 +-
 hadoop-common-project/hadoop-minikdc/pom.xml|   4 +-
 hadoop-common-project/hadoop-nfs/pom.xml|   4 +-
 hadoop-common-project/pom.xml   |   4 +-
 hadoop-dist/pom.xml |   4 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   4 +-
 .../org/apache/hadoop/hdfs/ClientContext.java   |   3 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java |   4 +-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |   6 +-
 .../hdfs/client/HdfsClientConfigKeys.java   |   5 +-
 .../hdfs/client/impl/BlockReaderFactory.java  

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2017-03-03 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
--
diff --cc hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
index 000,a2ba0bf..4b19a21f
mode 00,100644..100644
--- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
@@@ -1,0 -1,132 +1,143 @@@
+ 
+ 
+ http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+   4.0.0
+   
+ org.apache.hadoop
+ hadoop-project
+ 3.0.0-alpha3-SNAPSHOT
+ ../../hadoop-project
+   
+   hadoop-client-check-test-invariants
+   3.0.0-alpha3-SNAPSHOT
+   pom
+ 
+   Enforces our invariants for the testing client 
modules.
+   Apache Hadoop Client Packaging Invariants for Test
+ 
+   
+   
+ 
+   
+ 
+   org.apache.hadoop
+   hadoop-client-api
+ 
+ 
+   org.apache.hadoop
+   hadoop-client-runtime
+ 
+ 
+   org.apache.hadoop
+   hadoop-client-minicluster
+ 
+   
+   
+ 
+   
+ org.apache.maven.plugins
+ maven-enforcer-plugin
+ 1.4
+ 
+   
+ org.codehaus.mojo
+ extra-enforcer-rules
+ 1.0-beta-3
+   
+ 
+ 
+   
+ enforce-banned-dependencies
+ 
+   enforce
+ 
+ 
+   
+ 
+ 
+   
+ 
+ org.apache.hadoop:hadoop-annotations
+ 
+ org.apache.htrace:htrace-core4
+ 
+ org.slf4j:slf4j-api
+ 
+ commons-logging:commons-logging
+ 
+ log4j:log4j
+ 
+ junit:junit
+ 
+  org.hamcrest:hamcrest-core
+   
+ 
+ 
+   true
+   
+ 
+   org.apache.hadoop
+   hadoop-annotations
+   
+ *
+   
+ 
++
++  
++  
++  

++  

++  
++io.netty
++netty
++*
++  
++
+   
+ 
+   
+ 
+ 
+ 
+   
+ 
+   
+ 
+   
+ 
+ 
+ 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09517262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 86aed61,6f24858..f5d7da1
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@@ -115,8 -108,8 +110,9 @@@ import org.apache.hadoop.hdfs.DFSUtil
  import org.apache.hadoop.hdfs.DFSUtilClient;
  import org.apache.hadoop.hdfs.HDFSPolicyProvider;
  import org.apache.hadoop.hdfs.HdfsConfiguration;
- import org.apache.hadoop.ozone.container.common.statemachine
- .DatanodeStateMachine;
++import 

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2016-08-19 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/db22affd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index b692a33,ad3c172..243dbd2
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@@ -390,7 -389,10 +395,10 @@@ public class DataNode extends Reconfigu
private static final int NUM_CORES = Runtime.getRuntime()
.availableProcessors();
private static final double CONGESTION_RATIO = 1.5;
+   private DiskBalancer diskBalancer;
+ 
 -
+   private final SocketFactory socketFactory;
 +  private OzoneContainer ozoneServer;
  
private static Tracer createTracer(Configuration conf) {
  return new Tracer.Builder("DataNode").
@@@ -421,9 -421,9 +429,10 @@@
  this.connectToDnViaHostname = false;
  this.blockScanner = new BlockScanner(this, conf);
  this.pipelineSupportECN = false;
 +this.ozoneEnabled = false;
  this.checkDiskErrorInterval =
  ThreadLocalRandom.current().nextInt(5000, (int) (5000 * 1.25));
+ this.socketFactory = NetUtils.getDefaultSocketFactory(conf);
  initOOBTimeout();
}
  
@@@ -1145,9 -1183,26 +1194,26 @@@
 * Report a bad block which is hosted on the local DN.
 */
public void reportBadBlocks(ExtendedBlock block) throws IOException{
- BPOfferService bpos = getBPOSForBlock(block);
  FsVolumeSpi volume = getFSDataset().getVolume(block);
- bpos.reportBadBlocks(
+ if (volume == null) {
+   LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
+   return;
+ }
+ reportBadBlocks(block, volume);
+   }
+ 
+   /**
+* Report a bad block which is hosted on the local DN.
+*
+* @param block the bad block which is hosted on the local DN
+* @param volume the volume that block is stored in and the volume
+*must not be null
+* @throws IOException
+*/
+   public void reportBadBlocks(ExtendedBlock block, FsVolumeSpi volume)
+   throws IOException {
+ BPOfferService bpos = getBPOSForBlock(block);
 -bpos.reportBadBlocks(
++ bpos.reportBadBlocks(
  block, volume.getStorageID(), volume.getStorageType());
}
  
@@@ -1554,15 -1595,7 +1620,16 @@@
  data.addBlockPool(nsInfo.getBlockPoolID(), conf);
  blockScanner.enableBlockPoolId(bpos.getBlockPoolId());
  initDirectoryScanner(conf);
 +if(this.ozoneEnabled) {
 +  try {
 +ozoneServer = new OzoneContainer(conf, this.getFSDataset());
 +ozoneServer.start();
 +LOG.info("Ozone container server started.");
 +  } catch (Exception ex) {
 +LOG.error("Unable to start Ozone. ex: {}", ex.toString());
 +  }
 +}
+ initDiskBalancer(data, conf);
}
  
List getAllBpOs() {
@@@ -3338,4 -3402,75 +3455,74 @@@
public Tracer getTracer() {
  return tracer;
}
 -
+   /**
+* Allows submission of a disk balancer Job.
+* @param planID  - Hash value of the plan.
+* @param planVersion - Plan version, reserved for future use. We have only
+*version 1 now.
+* @param planFile - Plan file name
+* @param planData - Actual plan data in json format
+* @throws IOException
+*/
+   @Override
+   public void submitDiskBalancerPlan(String planID, long planVersion,
+   String planFile, String planData, boolean skipDateCheck)
+   throws IOException {
+ checkSuperuserPrivilege();
+ // TODO : Support force option
+ this.diskBalancer.submitPlan(planID, planVersion, planFile, planData,
+ skipDateCheck);
+   }
+ 
+   /**
+* Cancels a running plan.
+* @param planID - Hash string that identifies a plan
+*/
+   @Override
+   public void cancelDiskBalancePlan(String planID) throws
+   IOException {
+ checkSuperuserPrivilege();
+ this.diskBalancer.cancelPlan(planID);
+   }
+ 
+   /**
+* Returns the status of current or last executed work plan.
+* @return DiskBalancerWorkStatus.
+* @throws IOException
+*/
+   @Override
+   public DiskBalancerWorkStatus queryDiskBalancerPlan() throws IOException {
+ checkSuperuserPrivilege();
+ return this.diskBalancer.queryWorkStatus();
+   }
+ 
+   /**
+* Gets a runtime configuration value from  diskbalancer instance. For
+* example : DiskBalancer bandwidth.
+*
+* @param key - String that represents the run time key value.
+* @return value of the key as a string.
+* @throws IOException - Throws if there is no such key
+*/
+   @Override
+   public String getDiskBalancerSetting(String key) throws IOException {
+ checkSuperuserPrivilege();
+ Preconditions.checkNotNull(key);
+ 

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2016-02-01 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/16440b83/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
index 3147767,000..c85a554
mode 100644,00..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
@@@ -1,323 -1,0 +1,323 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.hadoop.storagecontainer;
 +
 +import com.google.protobuf.BlockingService;
 +import org.apache.hadoop.ha.HAServiceProtocol;
 +import org.apache.hadoop.hdfs.DFSUtil;
 +import org.apache.hadoop.hdfs.DFSUtilClient;
 +import org.apache.hadoop.hdfs.protocol.*;
 +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos;
 +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
 +import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
 +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 +import org.apache.hadoop.hdfs.server.blockmanagement.BlocksMap;
 +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 +import org.apache.hadoop.hdfs.server.namenode.NameNode;
 +import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 +import org.apache.hadoop.hdfs.server.protocol.*;
 +import org.apache.hadoop.ipc.ProtobufRpcEngine;
 +import org.apache.hadoop.ipc.RPC;
 +import org.apache.hadoop.ipc.WritableRpcEngine;
 +import org.apache.hadoop.net.NetUtils;
 +import org.apache.hadoop.ozone.OzoneConfiguration;
 +import org.apache.hadoop.storagecontainer.protocol.ContainerLocationProtocol;
 +import org.apache.hadoop.util.LightWeightGSet;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import java.io.IOException;
 +import java.net.InetSocketAddress;
 +import java.util.List;
 +
 +import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 +
 +/**
 + * Service that allocates storage containers and tracks their
 + * location.
 + */
 +public class StorageContainerManager
 +implements DatanodeProtocol, ContainerLocationProtocol {
 +
 +  public static final Logger LOG =
 +  LoggerFactory.getLogger(StorageContainerManager.class);
 +
 +  private final Namesystem ns = new StorageContainerNameService();
 +  private final BlockManager blockManager;
 +
 +  private long txnId = 234;
 +
 +  /** The RPC server that listens to requests from DataNodes. */
 +  private final RPC.Server serviceRpcServer;
 +  private final InetSocketAddress serviceRPCAddress;
 +
 +  /** The RPC server that listens to requests from clients. */
 +  private final RPC.Server clientRpcServer;
 +  private final InetSocketAddress clientRpcAddress;
 +
 +  public StorageContainerManager(OzoneConfiguration conf)
 +  throws IOException {
 +BlocksMap containerMap = new BlocksMap(
 +LightWeightGSet.computeCapacity(2.0, "BlocksMap"),
 +new StorageContainerMap());
- this.blockManager = new BlockManager(ns, conf, containerMap);
++this.blockManager = new BlockManager(ns, false, conf, containerMap);
 +
 +int handlerCount =
 +conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY,
 +DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
 +
 +RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
 +ProtobufRpcEngine.class);
 +
 +DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator =
 +new DatanodeProtocolServerSideTranslatorPB(this);
 +BlockingService dnProtoPbService =
 +DatanodeProtocolProtos.DatanodeProtocolService
 +.newReflectiveBlockingService(dnProtoPbTranslator);
 +
 +WritableRpcEngine.ensureInitialized();
 +
 +InetSocketAddress serviceRpcAddr = NameNode.getServiceAddress(conf, 
false);
 +if (serviceRpcAddr != null) {
 +  String bindHost =
 +  conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
 +  if (bindHost == null || bindHost.isEmpty()) {
 +

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into hdfs-7240

2015-11-04 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/312d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index da09b0e,29bcd79..c93a362
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@@ -2816,30 -2633,14 +2831,30 @@@ public class DataNode extends Reconfigu
}
  
/**
-* Convenience method, which unwraps RemoteException.
-* @throws IOException not a RemoteException.
-*/
 -   * Update replica with the new generation stamp and length.  
++  * Convenience method, which unwraps RemoteException.
++  * @throws IOException not a RemoteException.
++  */
 +  private static ReplicaRecoveryInfo callInitReplicaRecovery(
 +  InterDatanodeProtocol datanode,
 +  RecoveringBlock rBlock) throws IOException {
 +try {
 +  return datanode.initReplicaRecovery(rBlock);
- } catch(RemoteException re) {
++} catch (RemoteException re) {
 +  throw re.unwrapRemoteException();
 +}
 +  }
 +
 +  /**
-* Update replica with the new generation stamp and length.  
++   * Update replica with the new generation stamp and length.
 */
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
--  final long recoveryId, final long newBlockId, final long newLength)
++   final long recoveryId, final long 
newBlockId, final long newLength)
throws IOException {
 -final String storageID = data.updateReplicaUnderRecovery(oldBlock,
 -recoveryId, newBlockId, newLength);
 +final FsDatasetSpi dataset =
 +(FsDatasetSpi) getDataset(oldBlock.getBlockPoolId());
 +final String storageID = dataset.updateReplicaUnderRecovery(
 +oldBlock, recoveryId, newBlockId, newLength);
  // Notify the namenode of the updated block info. This is important
  // for HA, since otherwise the standby node may lose track of the
  // block locations until the next block report.
@@@ -2851,234 -2652,6 +2866,244 @@@
  return storageID;
}
  
-   /** A convenient class used in block recovery */
-   static class BlockRecord { 
++  /**
++   * A convenient class used in block recovery
++   */
++  static class BlockRecord {
 +final DatanodeID id;
 +final InterDatanodeProtocol datanode;
 +final ReplicaRecoveryInfo rInfo;
- 
 +private String storageID;
 +
 +BlockRecord(DatanodeID id,
 +InterDatanodeProtocol datanode,
 +ReplicaRecoveryInfo rInfo) {
 +  this.id = id;
 +  this.datanode = datanode;
 +  this.rInfo = rInfo;
 +}
 +
 +void updateReplicaUnderRecovery(String bpid, long recoveryId,
 +long newBlockId, long newLength)
 +throws IOException {
 +  final ExtendedBlock b = new ExtendedBlock(bpid, rInfo);
 +  storageID = datanode.updateReplicaUnderRecovery(b, recoveryId, 
newBlockId,
 +  newLength);
 +}
 +
 +@Override
 +public String toString() {
 +  return "block:" + rInfo + " node:" + id;
 +}
 +  }
 +
-   /** Recover a block */
++
++  /**
++   * Recover a block
++   */
 +  private void recoverBlock(RecoveringBlock rBlock) throws IOException {
 +ExtendedBlock block = rBlock.getBlock();
 +String blookPoolId = block.getBlockPoolId();
 +DatanodeID[] datanodeids = rBlock.getLocations();
 +List syncList = new 
ArrayList(datanodeids.length);
 +int errorCount = 0;
 +
 +//check generation stamps
- for(DatanodeID id : datanodeids) {
++for (DatanodeID id : datanodeids) {
 +  try {
 +BPOfferService bpos = blockPoolManager.get(blookPoolId);
 +DatanodeRegistration bpReg = bpos.bpRegistration;
- InterDatanodeProtocol datanode = bpReg.equals(id)?
- this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
- dnConf.socketTimeout, dnConf.connectToDnViaHostname);
++InterDatanodeProtocol datanode = bpReg.equals(id) ?
++this : DataNode.createInterDataNodeProtocolProxy(id, getConf(),
++dnConf.socketTimeout, dnConf.connectToDnViaHostname);
 +ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
 +if (info != null &&
 +info.getGenerationStamp() >= block.getGenerationStamp() &&
 +info.getNumBytes() > 0) {
 +  syncList.add(new BlockRecord(id, datanode, info));
 +}
 +  } catch (RecoveryInProgressException ripE) {
 +InterDatanodeProtocol.LOG.warn(
 +"Recovery for replica " + block + " on data-node " + id
- + " is already in 

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2015-09-29 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d313d1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
index 1496da2,000..cfacd5f
mode 100644,00..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
@@@ -1,124 -1,0 +1,130 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.hadoop.storagecontainer;
 +
 +import com.google.common.annotations.VisibleForTesting;
 +import com.google.common.base.Preconditions;
 +import org.apache.hadoop.hdfs.protocol.Block;
 +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 +import org.apache.hadoop.util.GSet;
 +
++import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.Iterator;
 +import java.util.Map;
 +
 +/**
 + * Maps a storage container to its location on datanodes. Similar to
 + * {@link org.apache.hadoop.hdfs.server.blockmanagement.BlocksMap}
 + */
 +public class StorageContainerMap implements GSet {
 +
 +  private Map containerPrefixMap
 +  = new HashMap();
 +  private int size;
 +  public static final int PREFIX_LENGTH = 28;
 +
 +  @Override
 +  public int size() {
 +// TODO: update size when new containers created
 +return size;
 +  }
 +
 +  @Override
 +  public boolean contains(Block key) {
 +return getBlockInfoContiguous(key.getBlockId()) != null;
 +  }
 +
 +  @Override
 +  public BlockInfoContiguous get(Block key) {
 +return getBlockInfoContiguous(key.getBlockId());
 +  }
 +
 +  @Override
 +  public BlockInfoContiguous put(BlockInfo element) {
 +BlockInfoContiguous info = getBlockInfoContiguous(element.getBlockId());
 +if (info == null) {
 +  throw new IllegalStateException(
 +  "The containers are created by splitting");
 +}
 +// TODO: replace
 +return info;
 +  }
 +
 +  @Override
 +  public BlockInfoContiguous remove(Block key) {
 +// It doesn't remove
 +return getBlockInfoContiguous(key.getBlockId());
 +  }
 +
 +  @Override
 +  public void clear() {
 +containerPrefixMap.clear();
 +  }
 +
 +  @Override
++  public Collection values() {
++return null;
++  }
++
++  @Override
 +  public Iterator iterator() {
 +// TODO : Support iteration
 +throw new UnsupportedOperationException("");
 +  }
 +
 +  /**
 +   * Initialize a new trie for a new bucket.
 +   */
 +  public synchronized void initPrefix(long prefix) {
 +Preconditions.checkArgument((prefix >>> PREFIX_LENGTH) == 0,
 +"Prefix shouldn't be longer than "+PREFIX_LENGTH+" bits");
 +if (getTrieMap(prefix << (64 - PREFIX_LENGTH)) != null) {
 +  // Already initialized
 +  return;
 +}
 +BitWiseTrieContainerMap newTrie = new BitWiseTrieContainerMap(prefix,
 +PREFIX_LENGTH);
 +containerPrefixMap.put(prefix, newTrie);
 +  }
 +
 +  @VisibleForTesting
 +  synchronized BitWiseTrieContainerMap getTrieMap(long containerId) {
 +long prefix = containerId >>> (64 - PREFIX_LENGTH);
 +return containerPrefixMap.get(prefix);
 +  }
 +
 +  @VisibleForTesting
 +  BlockInfoContiguous getBlockInfoContiguous(long containerId) {
 +BitWiseTrieContainerMap map = getTrieMap(containerId);
 +if (map == null) {
 +  return null;
 +}
 +return map.get(containerId);
 +  }
 +
 +  public void splitContainer(long key) {
 +BitWiseTrieContainerMap map = getTrieMap(key);
 +if (map == null) {
 +  throw new IllegalArgumentException("No container exists");
 +}
 +map.addBit(key);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d313d1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java