[47/50] [abbrv] hadoop git commit: Merge remote-tracking branch 'apache/trunk' into HDFS-7285

2015-09-30 Thread wang
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd55202/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 000,7101753..d9f409c
mode 00,100644..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@@ -1,0 -1,1917 +1,1889 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hdfs;
+ 
+ import java.io.EOFException;
+ import java.io.IOException;
+ import java.net.InetSocketAddress;
+ import java.nio.ByteBuffer;
+ import java.util.AbstractMap;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.EnumSet;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.Set;
+ import java.util.concurrent.Callable;
+ import java.util.concurrent.CancellationException;
+ import java.util.concurrent.CompletionService;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorCompletionService;
+ import java.util.concurrent.Future;
+ import java.util.concurrent.ThreadLocalRandom;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ 
 -import com.google.common.base.Preconditions;
+ import org.apache.commons.io.IOUtils;
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.fs.ByteBufferReadable;
+ import org.apache.hadoop.fs.ByteBufferUtil;
+ import org.apache.hadoop.fs.CanSetDropBehind;
+ import org.apache.hadoop.fs.CanSetReadahead;
+ import org.apache.hadoop.fs.CanUnbuffer;
+ import org.apache.hadoop.fs.ChecksumException;
+ import org.apache.hadoop.fs.FSInputStream;
+ import org.apache.hadoop.fs.FileEncryptionInfo;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
+ import org.apache.hadoop.fs.ReadOption;
+ import org.apache.hadoop.fs.StorageType;
+ import org.apache.hadoop.fs.UnresolvedLinkException;
+ import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
+ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+ import 
org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
+ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
+ import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
+ import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
+ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
+ import org.apache.hadoop.io.ByteBufferPool;
+ import org.apache.hadoop.ipc.RPC;
+ import org.apache.hadoop.ipc.RemoteException;
+ import org.apache.hadoop.net.NetUtils;
+ import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+ import org.apache.hadoop.security.token.Token;
+ import org.apache.hadoop.util.IdentityHashStore;
+ import org.apache.htrace.core.SpanId;
+ import org.apache.htrace.core.TraceScope;
+ import org.apache.htrace.core.Tracer;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ 
+ /
+  * DFSInputStream provides bytes from a named file.  It handles 
+  * negotiation of the namenode and various datanodes as necessary.
+  /
+ @InterfaceAudience.Private
+ public class DFSInputStream extends FSInputStream
+ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
+ HasEnhancedByteBufferAccess, CanUnbuffer 

[47/50] [abbrv] hadoop git commit: Merge remote-tracking branch 'apache/trunk' into HDFS-7285

2015-09-22 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1080c373/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1080c373/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 53c6cdb,28ea866..8874c4d
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@@ -23,8 -23,8 +23,9 @@@ import java.util.concurrent.TimeUnit
  import org.apache.hadoop.classification.InterfaceAudience;
  import org.apache.hadoop.fs.CommonConfigurationKeys;
  import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
  import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 +import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
  import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
  import org.apache.hadoop.http.HttpConfig;
  
@@@ -171,8 -171,8 +172,10 @@@ public class DFSConfigKeys extends Comm
public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;
public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY = 
"dfs.namenode.replication.min";
public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
 +  public static final String  DFS_NAMENODE_STRIPE_MIN_KEY = 
"dfs.namenode.stripe.min";
 +  public static final int DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1;
+   public static final String  DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY =
+   "dfs.namenode.safemode.replication.min";
public static final String  
DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY = 
"dfs.namenode.replication.pending.timeout-sec";
public static final int 
DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = 
"dfs.namenode.replication.max-streams";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1080c373/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1080c373/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1080c373/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index ac927ef,f4cf4c2..5bf52c5
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@@ -84,9 -85,7 +85,8 @@@ import org.apache.hadoop.hdfs.protocol.
  import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
  import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
  import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
- import org.apache.hadoop.hdfs.server.namenode.NameNode;
  import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
  import org.apache.hadoop.net.NetUtils;
  import org.apache.hadoop.security.AccessControlException;
  import org.apache.hadoop.security.Credentials;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1080c373/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 3217484,d93277c..1e4b899
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@@ -429,10 -422,10 +429,11 @@@ public class ClientNamenodeProtocolServ
req.getClientName(), flags);
AppendResponseProto.Builder builder = AppendResponseProto.newBuilder();
if (result.getLastBlock() != null) {
- builder.setBlock(PBHelper.convertLocatedBlock(result.getLastBlock()));
 -

[47/50] [abbrv] hadoop git commit: Merge remote-tracking branch 'apache/trunk' into HDFS-7285

2015-09-01 Thread zhz
Merge remote-tracking branch 'apache/trunk' into HDFS-7285


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab56fcdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab56fcdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab56fcdb

Branch: refs/heads/HDFS-7285
Commit: ab56fcdb1219d03713b408dd3a95d7405635254d
Parents: 164cbe6 cbb2495
Author: Zhe Zhang 
Authored: Thu Aug 27 16:23:41 2015 -0700
Committer: Zhe Zhang 
Committed: Tue Sep 1 14:30:25 2015 -0700

--
 .../server/AuthenticationFilter.java|   63 +-
 .../server/AuthenticationToken.java |   12 +
 .../security/authentication/util/AuthToken.java |   35 +-
 .../server/TestAuthenticationFilter.java|  163 ++-
 hadoop-common-project/hadoop-common/CHANGES.txt |   34 +
 .../src/main/conf/log4j.properties  |   13 +
 .../fs/CommonConfigurationKeysPublic.java   |5 +
 .../java/org/apache/hadoop/fs/CreateFlag.java   |2 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java|   11 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|5 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |   60 +
 .../apache/hadoop/ipc/WritableRpcEngine.java|3 +
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   |   48 +
 .../apache/hadoop/metrics2/lib/MutableStat.java |7 +-
 .../org/apache/hadoop/metrics2/util/MBeans.java |   37 +-
 .../org/apache/hadoop/util/HostsFileReader.java |7 +-
 .../main/java/org/apache/hadoop/util/Shell.java |   11 +-
 .../org/apache/hadoop/util/StringUtils.java |   29 +-
 .../src/main/resources/core-default.xml |9 +
 .../src/site/markdown/HttpAuthentication.md |8 +-
 .../hadoop-common/src/site/markdown/Metrics.md  |2 +
 .../src/site/markdown/SingleCluster.md.vm   |2 +-
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  |   77 +-
 .../org/apache/hadoop/test/MetricsAsserts.java  |2 +-
 .../java/org/apache/hadoop/util/TestShell.java  |   39 +
 .../hadoop-common/src/test/proto/test.proto |7 +
 .../src/test/proto/test_rpc_service.proto   |1 +
 .../dev-support/findbugsExcludeFile.xml |   10 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |   26 +
 .../org/apache/hadoop/hdfs/ExtendedBlockId.java |   82 ++
 .../org/apache/hadoop/hdfs/ReplicaAccessor.java |   88 ++
 .../hadoop/hdfs/ReplicaAccessorBuilder.java |  101 ++
 .../hdfs/client/HdfsClientConfigKeys.java   |   76 +-
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  794 +
 .../hadoop/hdfs/client/impl/package-info.java   |   18 +
 .../org/apache/hadoop/hdfs/net/DomainPeer.java  |  132 +++
 .../java/org/apache/hadoop/hdfs/net/Peer.java   |  123 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |7 +
 .../datatransfer/BlockConstructionStage.java|   62 +
 .../datatransfer/DataTransferProtoUtil.java |  146 +++
 .../datatransfer/DataTransferProtocol.java  |  202 
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |   66 ++
 .../hdfs/protocol/datatransfer/Sender.java  |  261 +
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  254 +
 .../token/block/InvalidBlockTokenException.java |   41 +
 .../server/datanode/BlockMetadataHeader.java|  209 
 .../hdfs/server/datanode/CachingStrategy.java   |   76 ++
 .../hadoop/hdfs/shortcircuit/ClientMmap.java|   75 ++
 .../hadoop/hdfs/shortcircuit/DfsClientShm.java  |  119 ++
 .../hdfs/shortcircuit/DfsClientShmManager.java  |  522 +
 .../hdfs/shortcircuit/DomainSocketFactory.java  |  196 
 .../hdfs/shortcircuit/ShortCircuitCache.java| 1066 +
 .../hdfs/shortcircuit/ShortCircuitReplica.java  |  352 ++
 .../shortcircuit/ShortCircuitReplicaInfo.java   |   64 ++
 .../hdfs/shortcircuit/ShortCircuitShm.java  |  647 +++
 .../hadoop/hdfs/util/ByteArrayManager.java  |  422 +++
 .../hadoop/hdfs/util/ExactSizeInputStream.java  |  125 ++
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  |   46 +
 .../apache/hadoop/hdfs/util/package-info.java   |   18 +
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |   20 +
 .../hdfs/web/resources/CreateFlagParam.java |   48 +
 .../hdfs/web/resources/CreateParentParam.java   |2 +-
 .../src/main/proto/ClientDatanodeProtocol.proto |   33 -
 .../src/main/proto/datatransfer.proto   |4 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  125 +-
 .../dev-support/findbugsExcludeFile.xml |   10 -
 .../hadoop-hdfs/src/CMakeLists.txt  |1 +
 .../apache/hadoop/fs/BlockStorageLocation.java  |   52 -
 .../java/org/apache/hadoop/fs/HdfsVolumeId.java |   73 --
 .../java/org/apache/hadoop/fs/VolumeId.java |   40 -
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |   65 +-
 .../hadoop/hdfs/BlockStorageLocationUtil.java   |  368 --