hadoop git commit: YARN-5176. More test cases for queuing of containers at the NM. (Konstantinos Karanasos via asuresh)

2016-06-07 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 10f0c0475 -> 154c7c343


YARN-5176. More test cases for queuing of containers at the NM. (Konstantinos 
Karanasos via asuresh)

(cherry picked from commit 76f0800c21f49fba01694cbdc870103053da802c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/154c7c34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/154c7c34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/154c7c34

Branch: refs/heads/branch-2
Commit: 154c7c343bb60d93fa4c0398a57bf17536ddedb7
Parents: 10f0c04
Author: Arun Suresh 
Authored: Tue Jun 7 17:16:18 2016 -0700
Committer: Arun Suresh 
Committed: Tue Jun 7 17:17:22 2016 -0700

--
 .../queuing/QueuingContainerManagerImpl.java|  11 +
 .../BaseContainerManagerTest.java   |  64 +++
 .../containermanager/TestContainerManager.java  |  76 +---
 .../queuing/TestQueuingContainerManager.java| 388 +++
 4 files changed, 391 insertions(+), 148 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/154c7c34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
index 1ce3356..a1e3bdb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
@@ -160,6 +160,7 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
   containerTokenId.getExecutionType());
 
   if (foundInQueue) {
+LOG.info("Removing queued container with ID " + containerID);
 this.context.getQueuingContext().getKilledQueuedContainers().put(
 containerTokenId,
 "Queued container request removed by ApplicationMaster.");
@@ -502,6 +503,16 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
 return allocatedOpportunisticContainers.size();
   }
 
+  @VisibleForTesting
+  public int getNumQueuedGuaranteedContainers() {
+return queuedGuaranteedContainers.size();
+  }
+
+  @VisibleForTesting
+  public int getNumQueuedOpportunisticContainers() {
+return queuedOpportunisticContainers.size();
+  }
+
   class QueuingApplicationEventDispatcher implements
   EventHandler {
 private EventHandler applicationEventDispatcher;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/154c7c34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index ab60288..4f0e5c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -40,10 +40,17 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import 

hadoop git commit: YARN-5176. More test cases for queuing of containers at the NM. (Konstantinos Karanasos via asuresh)

2016-06-07 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 58be55b6e -> 76f0800c2


YARN-5176. More test cases for queuing of containers at the NM. (Konstantinos 
Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76f0800c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76f0800c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76f0800c

Branch: refs/heads/trunk
Commit: 76f0800c21f49fba01694cbdc870103053da802c
Parents: 58be55b
Author: Arun Suresh 
Authored: Tue Jun 7 17:16:18 2016 -0700
Committer: Arun Suresh 
Committed: Tue Jun 7 17:16:18 2016 -0700

--
 .../queuing/QueuingContainerManagerImpl.java|  11 +
 .../BaseContainerManagerTest.java   |  64 +++
 .../containermanager/TestContainerManager.java  |  76 +---
 .../queuing/TestQueuingContainerManager.java| 388 +++
 4 files changed, 391 insertions(+), 148 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76f0800c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
index 1ce3356..a1e3bdb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
@@ -160,6 +160,7 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
   containerTokenId.getExecutionType());
 
   if (foundInQueue) {
+LOG.info("Removing queued container with ID " + containerID);
 this.context.getQueuingContext().getKilledQueuedContainers().put(
 containerTokenId,
 "Queued container request removed by ApplicationMaster.");
@@ -502,6 +503,16 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
 return allocatedOpportunisticContainers.size();
   }
 
+  @VisibleForTesting
+  public int getNumQueuedGuaranteedContainers() {
+return queuedGuaranteedContainers.size();
+  }
+
+  @VisibleForTesting
+  public int getNumQueuedOpportunisticContainers() {
+return queuedOpportunisticContainers.size();
+  }
+
   class QueuingApplicationEventDispatcher implements
   EventHandler {
 private EventHandler applicationEventDispatcher;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76f0800c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index ab60288..4f0e5c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -40,10 +40,17 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;

hadoop git commit: YARN-5199. Close LogReader in in AHSWebServices#getStreamingOutput and FileInputStream in NMWebServices#getLogs. Contributed by Xuan Gong

2016-06-07 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6a9f38eba -> 10f0c0475


YARN-5199. Close LogReader in in AHSWebServices#getStreamingOutput and
FileInputStream in NMWebServices#getLogs. Contributed by Xuan Gong

(cherry picked from commit 58be55b6e07b94aa55ed87c461f3e5c04cc61630)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10f0c047
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10f0c047
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10f0c047

Branch: refs/heads/branch-2
Commit: 10f0c0475e5b66e5ac33ab05d831d45d136996e5
Parents: 6a9f38e
Author: Xuan 
Authored: Tue Jun 7 16:07:02 2016 -0700
Committer: Xuan 
Committed: Tue Jun 7 16:08:54 2016 -0700

--
 .../webapp/AHSWebServices.java  | 155 ++-
 .../nodemanager/webapp/NMWebServices.java   |  71 +
 2 files changed, 118 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10f0c047/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index d91ae55..59dbd44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -40,7 +40,6 @@ import javax.ws.rs.core.Response;
 import javax.ws.rs.core.StreamingOutput;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.Response.Status;
-
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -363,86 +362,94 @@ public class AHSWebServices extends WebServices {
   if ((nodeId == null || nodeName.contains(LogAggregationUtils
   .getNodeString(nodeId))) && !nodeName.endsWith(
   LogAggregationUtils.TMP_FILE_SUFFIX)) {
-AggregatedLogFormat.LogReader reader =
-new AggregatedLogFormat.LogReader(conf,
-thisNodeFile.getPath());
-DataInputStream valueStream;
-LogKey key = new LogKey();
-valueStream = reader.next(key);
-while (valueStream != null && !key.toString()
-.equals(containerIdStr)) {
-  // Next container
-  key = new LogKey();
+AggregatedLogFormat.LogReader reader = null;
+try {
+  reader = new AggregatedLogFormat.LogReader(conf,
+  thisNodeFile.getPath());
+  DataInputStream valueStream;
+  LogKey key = new LogKey();
   valueStream = reader.next(key);
-}
-if (valueStream == null) {
-  continue;
-}
-while (true) {
-  try {
-String fileType = valueStream.readUTF();
-String fileLengthStr = valueStream.readUTF();
-long fileLength = Long.parseLong(fileLengthStr);
-if (fileType.equalsIgnoreCase(logFile)) {
-  StringBuilder sb = new StringBuilder();
-  sb.append("LogType:");
-  sb.append(fileType + "\n");
-  sb.append("Log Upload Time:");
-  sb.append(Times.format(System.currentTimeMillis()) + "\n");
-  sb.append("LogLength:");
-  sb.append(fileLengthStr + "\n");
-  sb.append("Log Contents:\n");
-  byte[] b = sb.toString().getBytes(Charset.forName("UTF-8"));
-  os.write(b, 0, b.length);
-
-  long toSkip = 0;
-  long totalBytesToRead = fileLength;
-  if (bytes < 0) {
-long absBytes = Math.abs(bytes);
-if (absBytes < fileLength) {
-  toSkip = fileLength - absBytes;
-  totalBytesToRead = absBytes;
+  while 

hadoop git commit: YARN-5199. Close LogReader in in AHSWebServices#getStreamingOutput and FileInputStream in NMWebServices#getLogs. Contributed by Xuan Gong

2016-06-07 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8554aee1b -> 58be55b6e


YARN-5199. Close LogReader in in AHSWebServices#getStreamingOutput and
FileInputStream in NMWebServices#getLogs. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58be55b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58be55b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58be55b6

Branch: refs/heads/trunk
Commit: 58be55b6e07b94aa55ed87c461f3e5c04cc61630
Parents: 8554aee1b
Author: Xuan 
Authored: Tue Jun 7 16:07:02 2016 -0700
Committer: Xuan 
Committed: Tue Jun 7 16:07:02 2016 -0700

--
 .../webapp/AHSWebServices.java  | 155 ++-
 .../nodemanager/webapp/NMWebServices.java   |  71 +
 2 files changed, 118 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58be55b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index d91ae55..59dbd44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -40,7 +40,6 @@ import javax.ws.rs.core.Response;
 import javax.ws.rs.core.StreamingOutput;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.Response.Status;
-
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -363,86 +362,94 @@ public class AHSWebServices extends WebServices {
   if ((nodeId == null || nodeName.contains(LogAggregationUtils
   .getNodeString(nodeId))) && !nodeName.endsWith(
   LogAggregationUtils.TMP_FILE_SUFFIX)) {
-AggregatedLogFormat.LogReader reader =
-new AggregatedLogFormat.LogReader(conf,
-thisNodeFile.getPath());
-DataInputStream valueStream;
-LogKey key = new LogKey();
-valueStream = reader.next(key);
-while (valueStream != null && !key.toString()
-.equals(containerIdStr)) {
-  // Next container
-  key = new LogKey();
+AggregatedLogFormat.LogReader reader = null;
+try {
+  reader = new AggregatedLogFormat.LogReader(conf,
+  thisNodeFile.getPath());
+  DataInputStream valueStream;
+  LogKey key = new LogKey();
   valueStream = reader.next(key);
-}
-if (valueStream == null) {
-  continue;
-}
-while (true) {
-  try {
-String fileType = valueStream.readUTF();
-String fileLengthStr = valueStream.readUTF();
-long fileLength = Long.parseLong(fileLengthStr);
-if (fileType.equalsIgnoreCase(logFile)) {
-  StringBuilder sb = new StringBuilder();
-  sb.append("LogType:");
-  sb.append(fileType + "\n");
-  sb.append("Log Upload Time:");
-  sb.append(Times.format(System.currentTimeMillis()) + "\n");
-  sb.append("LogLength:");
-  sb.append(fileLengthStr + "\n");
-  sb.append("Log Contents:\n");
-  byte[] b = sb.toString().getBytes(Charset.forName("UTF-8"));
-  os.write(b, 0, b.length);
-
-  long toSkip = 0;
-  long totalBytesToRead = fileLength;
-  if (bytes < 0) {
-long absBytes = Math.abs(bytes);
-if (absBytes < fileLength) {
-  toSkip = fileLength - absBytes;
-  totalBytesToRead = absBytes;
+  while (valueStream != null && !key.toString()
+  

[Hadoop Wiki] Trivial Update of "HowToRelease" by SomeOtherAccount

2016-06-07 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "HowToRelease" page has been changed by SomeOtherAccount:
https://wiki.apache.org/hadoop/HowToRelease?action=diff=83=84

  ## page was copied from HowToReleasePostMavenization
  ''This page is prepared for Hadoop Core committers. You need committer rights 
to create a new  Hadoop Core release.''
  
- These instructions have been updated to use dev-support/bin/create-release. 
Earlier versions of this document are at HowToReleaseWithSvnAndAnt and 
HowToReleasePostMavenization and HowToReleasePreDSBCR
+ These instructions have been updated to use dev-support/bin/create-release. 
Earlier versions of this document are at HowToReleaseWithSvnAndAnt and 
HowToReleasePostMavenization and [[HowToReleasePreDSBCR]]
  
  <>
  
- '''READ ALL OF THESE INSTRUCTIONS THOROUGHLY BEFORE PROCEEDING!
+ '''READ ALL OF THESE INSTRUCTIONS THOROUGHLY BEFORE PROCEEDING! '''
- '''
  
  = Preparation =
   1. If you have not already done so, 
[[http://www.apache.org/dev/release-signing.html#keys-policy|append your code 
signing key]] to the 
[[https://dist.apache.org/repos/dist/release/hadoop/common/KEYS|KEYS]] file. 
Once you commit your changes, they will automatically be propagated to the 
website. Also 
[[http://www.apache.org/dev/release-signing.html#keys-policy|upload your key to 
a public key server]] if you haven't. End users use the KEYS file (along with 
the [[http://www.apache.org/dev/release-signing.html#web-of-trust|web of 
trust]]) to validate that releases were done by an Apache committer. For more 
details on signing releases, see 
[[http://www.apache.org/dev/release-signing.html|Signing Releases]] and 
[[http://www.apache.org/dev/mirror-step-by-step.html?Step-By-Step|Step-By-Step 
Guide to Mirroring Releases]].
@@ -71, +70 @@

  mvn versions:set -DnewVersion=X.Y.Z
  }}}
  
- 
  Now, for any branches in {trunk, branch-X, branch-X.Y, branch-X.Y.Z} that 
have changed, push them to the remote repo taking care of any conflicts.
  
  {{{
@@ -87, +85 @@

   1. On a Docker- and Internet- capable machine, build the release candidate 
with {{{create-release}}}. Unless the {{{--logdir}}} is given, logs will be in 
the {{{patchprocess/}}} directory. Artifacts will be in the target/artifacts 
NOTE: This will take quite a while, since it will download and build the entire 
source tree, including documentation and native components, from scratch to 
avoid maven repository caching issues hiding issues with the source release.
   {{{
   dev-support/bin/create-release --asfrelease --docker --dockercache
-  }}}
+ }}}
   1. While it should fail {{{create-release}}} if there are issues, 
doublecheck the rat log to find and fix any potential licensing issues.
   {{{
   grep 'Rat check' target/artifacts/mvn_apache_rat.log
-  }}}
+ }}}
   1. Check that release files look ok - e.g. install it somewhere fresh and 
run examples from tutorial, do a fresh build, read the release notes looking 
for WARNINGs, etc.
   1. Set environment variable version for later steps. {{{export 
version=X.Y.Z-RCN}}}
   1. Tag the release candidate:
   {{{
   git tag -s release-$version -m "Release candidate - $version"
-  }}}
+ }}}
   1. Push branch-X.Y.Z and the newly created tag to the remote repo.
   1. Deploy the maven artifacts, on your personal computer. Please be sure you 
have completed the prerequisite step of preparing the {{{settings.xml}}} file 
before the deployment. You might want to do this in private and clear your 
history file as your gpg-passphrase is in clear text.
   {{{
@@ -135, +133 @@

  svn ci -m "Publishing the bits for release ${version}"
  }}}
   1. Update upstream branches to make them aware of this new release:
-1. Copy and commit the CHANGES.md and RELEASENOTES.md:
+   1. Copy and commit the CHANGES.md and RELEASENOTES.md:
-{{{
+   {{{
   cp target/artifacts/RELEASENOTES.md 
hadoop-common-project/hadoop-common/src/site/markdown/release/${version}/RELEASENOTES.${version}.md
   cp target/artifacts/CHANGES.md 
hadoop-common-project/hadoop-common/src/site/markdown/release/${version}/CHANGES.${version}.md
-}}}
+ }}}
-1. Update {{{hadoop-project-dist/pom.xml}}} to point to this new stable 
version of the API and commit the change.
+   1. Update {{{hadoop-project-dist/pom.xml}}} to point to this new stable 
version of the API and commit the change.
-{{{
+   {{{
   X.Y.Z
-}}}
+ }}}
   1. In [[https://repository.apache.org|Nexus]]
1. effect the release of artifacts by selecting the staged repository and 
then clicking {{{Release}}}
1. If there were multiple RCs, simply drop the staging repositories 
corresponding to failed RCs.

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Addendum patch for YARN-5180 updating findbugs-exclude.xml

2016-06-07 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8b34040cb -> 6a9f38eba


Addendum patch for YARN-5180 updating findbugs-exclude.xml

(cherry picked from commit 8554aee1bef5aff9e49e5e9119d6a7a4abf1c432)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a9f38eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a9f38eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a9f38eb

Branch: refs/heads/branch-2
Commit: 6a9f38ebaf766a8baa60bb26fbcc1d5797c331e1
Parents: 8b34040
Author: Arun Suresh 
Authored: Tue Jun 7 15:59:13 2016 -0700
Committer: Arun Suresh 
Committed: Tue Jun 7 16:00:59 2016 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a9f38eb/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 81c7e6a..6998d75 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -517,6 +517,11 @@
   
 
   
+
+
+
+  
+  
 
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Addendum patch for YARN-5180 updating findbugs-exclude.xml

2016-06-07 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 733f3f18d -> 8554aee1b


Addendum patch for YARN-5180 updating findbugs-exclude.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8554aee1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8554aee1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8554aee1

Branch: refs/heads/trunk
Commit: 8554aee1bef5aff9e49e5e9119d6a7a4abf1c432
Parents: 733f3f1
Author: Arun Suresh 
Authored: Tue Jun 7 15:59:13 2016 -0700
Committer: Arun Suresh 
Committed: Tue Jun 7 15:59:43 2016 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8554aee1/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 81c7e6a..6998d75 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -517,6 +517,11 @@
   
 
   
+
+
+
+  
+  
 
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[Hadoop Wiki] Update of "HowToRelease" by SomeOtherAccount

2016-06-07 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "HowToRelease" page has been changed by SomeOtherAccount:
https://wiki.apache.org/hadoop/HowToRelease?action=diff=83=84

Comment:
Rewrite based upon the new dev-support/bin/create-release script

  ## page was copied from HowToReleasePostMavenization
  ''This page is prepared for Hadoop Core committers. You need committer rights 
to create a new  Hadoop Core release.''
  
- These instructions have been updated to use dev-support/bin/create-release. 
Earlier versions of this document are at HowToReleaseWithSvnAndAnt and 
HowToReleasePostMavenization and HowToReleasePreDSBCR
+ These instructions have been updated to use dev-support/bin/create-release. 
Earlier versions of this document are at HowToReleaseWithSvnAndAnt and 
HowToReleasePostMavenization and [[HowToReleasePreDSBCR]]
  
  <>
  
- '''READ ALL OF THESE INSTRUCTIONS THOROUGHLY BEFORE PROCEEDING!
+ '''READ ALL OF THESE INSTRUCTIONS THOROUGHLY BEFORE PROCEEDING! '''
- '''
  
  = Preparation =
   1. If you have not already done so, 
[[http://www.apache.org/dev/release-signing.html#keys-policy|append your code 
signing key]] to the 
[[https://dist.apache.org/repos/dist/release/hadoop/common/KEYS|KEYS]] file. 
Once you commit your changes, they will automatically be propagated to the 
website. Also 
[[http://www.apache.org/dev/release-signing.html#keys-policy|upload your key to 
a public key server]] if you haven't. End users use the KEYS file (along with 
the [[http://www.apache.org/dev/release-signing.html#web-of-trust|web of 
trust]]) to validate that releases were done by an Apache committer. For more 
details on signing releases, see 
[[http://www.apache.org/dev/release-signing.html|Signing Releases]] and 
[[http://www.apache.org/dev/mirror-step-by-step.html?Step-By-Step|Step-By-Step 
Guide to Mirroring Releases]].
@@ -71, +70 @@

  mvn versions:set -DnewVersion=X.Y.Z
  }}}
  
- 
  Now, for any branches in {trunk, branch-X, branch-X.Y, branch-X.Y.Z} that 
have changed, push them to the remote repo taking care of any conflicts.
  
  {{{
@@ -87, +85 @@

   1. On a Docker- and Internet- capable machine, build the release candidate 
with {{{create-release}}}. Unless the {{{--logdir}}} is given, logs will be in 
the {{{patchprocess/}}} directory. Artifacts will be in the target/artifacts 
NOTE: This will take quite a while, since it will download and build the entire 
source tree, including documentation and native components, from scratch to 
avoid maven repository caching issues hiding issues with the source release.
   {{{
   dev-support/bin/create-release --asfrelease --docker --dockercache
-  }}}
+ }}}
   1. While it should fail {{{create-release}}} if there are issues, 
doublecheck the rat log to find and fix any potential licensing issues.
   {{{
   grep 'Rat check' target/artifacts/mvn_apache_rat.log
-  }}}
+ }}}
   1. Check that release files look ok - e.g. install it somewhere fresh and 
run examples from tutorial, do a fresh build, read the release notes looking 
for WARNINGs, etc.
   1. Set environment variable version for later steps. {{{export 
version=X.Y.Z-RCN}}}
   1. Tag the release candidate:
   {{{
   git tag -s release-$version -m "Release candidate - $version"
-  }}}
+ }}}
   1. Push branch-X.Y.Z and the newly created tag to the remote repo.
   1. Deploy the maven artifacts, on your personal computer. Please be sure you 
have completed the prerequisite step of preparing the {{{settings.xml}}} file 
before the deployment. You might want to do this in private and clear your 
history file as your gpg-passphrase is in clear text.
   {{{
@@ -135, +133 @@

  svn ci -m "Publishing the bits for release ${version}"
  }}}
   1. Update upstream branches to make them aware of this new release:
-1. Copy and commit the CHANGES.md and RELEASENOTES.md:
+   1. Copy and commit the CHANGES.md and RELEASENOTES.md:
-{{{
+   {{{
   cp target/artifacts/RELEASENOTES.md 
hadoop-common-project/hadoop-common/src/site/markdown/release/${version}/RELEASENOTES.${version}.md
   cp target/artifacts/CHANGES.md 
hadoop-common-project/hadoop-common/src/site/markdown/release/${version}/CHANGES.${version}.md
-}}}
+ }}}
-1. Update {{{hadoop-project-dist/pom.xml}}} to point to this new stable 
version of the API and commit the change.
+   1. Update {{{hadoop-project-dist/pom.xml}}} to point to this new stable 
version of the API and commit the change.
-{{{
+   {{{
   X.Y.Z
-}}}
+ }}}
   1. In [[https://repository.apache.org|Nexus]]
1. effect the release of artifacts by selecting the staged repository and 
then clicking {{{Release}}}
1. If there were multiple RCs, simply drop the staging repositories 
corresponding to failed RCs.

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

hadoop git commit: MAPREDUCE-6702. TestMiniMRChildTask.testTaskEnv and TestMiniMRChildTask.testTaskOldEnv are failing (ajisakaa via rkanter)

2016-06-07 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 620325e81 -> 733f3f18d


MAPREDUCE-6702. TestMiniMRChildTask.testTaskEnv and 
TestMiniMRChildTask.testTaskOldEnv are failing (ajisakaa via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/733f3f18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/733f3f18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/733f3f18

Branch: refs/heads/trunk
Commit: 733f3f18d5cf38cbae35146fbef8e16e35fdf5e1
Parents: 620325e
Author: Robert Kanter 
Authored: Tue Jun 7 15:46:06 2016 -0700
Committer: Robert Kanter 
Committed: Tue Jun 7 15:46:06 2016 -0700

--
 .../src/site/markdown/SingleCluster.md.vm   |  12 +-
 .../java/org/apache/hadoop/mapred/JobConf.java  |   6 -
 .../hadoop/mapred/TestMiniMRChildTask.java  | 233 +++
 3 files changed, 41 insertions(+), 210 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/733f3f18/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm 
b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
index 573ca32..4825e00 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
@@ -181,13 +181,23 @@ You can run a MapReduce job on YARN in a 
pseudo-distributed mode by setting a fe
 
 The following instructions assume that 1. ~ 4. steps of [the above 
instructions](#Execution) are already executed.
 
-1.  Configure parameters as follows:`etc/hadoop/mapred-site.xml`:
+1.  Configure parameters as follows:
+
+`etc/hadoop/mapred-site.xml`:
 
 
 
 mapreduce.framework.name
 yarn
 
+
+mapreduce.admin.user.env
+HADOOP_MAPRED_HOME=$HADOOP_COMMON_HOME
+
+
+yarn.app.mapreduce.am.env
+HADOOP_MAPRED_HOME=$HADOOP_COMMON_HOME
+
 
 
 `etc/hadoop/yarn-site.xml`:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/733f3f18/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index 2cfce1f..f2b0aae 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -294,8 +294,6 @@ public class JobConf extends Configuration {
* Example:
* 
*A=foo - This will set the env variable A to foo. 
-   *B=$X:c This is inherit tasktracker's X env variable on Linux. 
-   *B=%X%;c This is inherit tasktracker's X env variable on Windows. 

* 
* 
* @deprecated Use {@link #MAPRED_MAP_TASK_ENV} or 
@@ -314,8 +312,6 @@ public class JobConf extends Configuration {
* Example:
* 
*A=foo - This will set the env variable A to foo. 
-   *B=$X:c This is inherit tasktracker's X env variable on Linux. 
-   *B=%X%;c This is inherit tasktracker's X env variable on Windows. 

* 
*/
   public static final String MAPRED_MAP_TASK_ENV = JobContext.MAP_ENV;
@@ -330,8 +326,6 @@ public class JobConf extends Configuration {
* Example:
* 
*A=foo - This will set the env variable A to foo. 
-   *B=$X:c This is inherit tasktracker's X env variable on Linux. 
-   *B=%X%;c This is inherit tasktracker's X env variable on Windows. 

* 
*/
   public static final String MAPRED_REDUCE_TASK_ENV = JobContext.REDUCE_ENV;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/733f3f18/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
 

hadoop git commit: HDFS-10496. DiskBalancer: ExecuteCommand checks planFile in a wrong way. Contributed by Lei (Eddy) Xu.

2016-06-07 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 76a1391d5 -> 32058f9b6


HDFS-10496. DiskBalancer: ExecuteCommand checks planFile in a wrong way. 
Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32058f9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32058f9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32058f9b

Branch: refs/heads/HDFS-1312
Commit: 32058f9b6d49b93ac8e99da383cb86170a16b317
Parents: 76a1391
Author: Anu Engineer 
Authored: Tue Jun 7 15:10:22 2016 -0700
Committer: Anu Engineer 
Committed: Tue Jun 7 15:10:22 2016 -0700

--
 .../hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32058f9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
index c17ef00..6d30e86 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
@@ -61,7 +61,7 @@ public class ExecuteCommand extends Command {
 verifyCommandOptions(DiskBalancer.EXECUTE, cmd);
 
 String planFile = cmd.getOptionValue(DiskBalancer.EXECUTE);
-Preconditions.checkArgument(planFile == null || planFile.isEmpty(),
+Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
 "Invalid plan file specified.");
 
 String planData = null;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. (vinodkv via wangda)

2016-06-07 Thread wangda
YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. 
(vinodkv via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/620325e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/620325e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/620325e8

Branch: refs/heads/trunk
Commit: 620325e81696fca140195b74929ed9eda2d5eb16
Parents: be34e85
Author: Wangda Tan 
Authored: Tue Jun 7 15:06:42 2016 -0700
Committer: Wangda Tan 
Committed: Tue Jun 7 15:06:42 2016 -0700

--
 .../yarn/api/records/AMBlackListingRequest.java |  67 -
 .../records/ApplicationSubmissionContext.java   |  23 --
 .../hadoop/yarn/conf/YarnConfiguration.java |  25 +-
 .../src/main/proto/yarn_protos.proto|   5 -
 .../yarn/conf/TestYarnConfigurationFields.java  |   7 +
 .../impl/pb/AMBlackListingRequestPBImpl.java| 104 
 .../pb/ApplicationSubmissionContextPBImpl.java  |  40 ---
 .../src/main/resources/yarn-default.xml |  19 --
 .../hadoop/yarn/api/TestPBImplRecords.java  |  10 -
 .../blacklist/BlacklistManager.java |   9 +-
 .../blacklist/BlacklistUpdates.java |  47 
 .../blacklist/DisabledBlacklistManager.java |  12 +-
 .../blacklist/SimpleBlacklistManager.java   |  17 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  79 ++
 .../rmapp/attempt/RMAppAttempt.java |   2 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |  85 +--
 .../scheduler/AbstractYarnScheduler.java|   2 +-
 .../scheduler/AppSchedulingInfo.java|  74 +++---
 .../scheduler/SchedulerAppUtils.java|  16 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  33 ++-
 .../scheduler/capacity/CapacityScheduler.java   |  11 +-
 .../allocator/RegularContainerAllocator.java|   2 +-
 .../scheduler/fair/FSLeafQueue.java |   2 +-
 .../scheduler/fair/FairScheduler.java   |   8 +-
 .../scheduler/fifo/FifoScheduler.java   |  12 +-
 .../webapp/RMAppAttemptBlock.java   |   9 +-
 .../resourcemanager/webapp/RMAppBlock.java  |  13 +-
 .../resourcemanager/webapp/RMWebServices.java   |  21 +-
 .../webapp/dao/AMBlackListingRequestInfo.java   |  61 -
 .../webapp/dao/AppAttemptInfo.java  |   8 +-
 .../dao/ApplicationSubmissionContextInfo.java   |  13 -
 .../TestNodeBlacklistingOnAMFailures.java   | 251 +++
 .../applicationsmanager/TestAMRestart.java  | 177 +
 .../blacklist/TestBlacklistManager.java |  29 +--
 .../rmapp/TestRMAppTransitions.java |  58 -
 .../scheduler/TestAppSchedulingInfo.java|  12 +-
 .../capacity/TestCapacityScheduler.java |   8 +-
 .../scheduler/fair/TestFSAppAttempt.java|  12 +-
 .../scheduler/fair/TestFairScheduler.java   |   9 +-
 .../TestRMWebServicesAppsModification.java  |  39 ++-
 40 files changed, 536 insertions(+), 895 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
deleted file mode 100644
index 4aec2ba..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.api.records;
-
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Evolving;

[1/2] hadoop git commit: YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. (vinodkv via wangda)

2016-06-07 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk be34e85e6 -> 620325e81


http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
index 60b728e..e8c8bca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
@@ -42,7 +42,7 @@ public class AppAttemptInfo {
   protected String nodeId;
   protected String logsLink;
   protected String blacklistedNodes;
-  protected String rmBlacklistedNodesForAMLaunches;
+  private String nodesBlacklistedBySystem;
   protected String appAttemptId;
 
   public AppAttemptInfo() {
@@ -69,9 +69,9 @@ public class AppAttemptInfo {
 + masterContainer.getNodeHttpAddress(),
 ConverterUtils.toString(masterContainer.getId()), user);
 
-rmBlacklistedNodesForAMLaunches = StringUtils.join(
-attempt.getAMBlacklist().getBlacklistUpdates().getAdditions(),
-", ");
+nodesBlacklistedBySystem =
+StringUtils.join(attempt.getAMBlacklistManager()
+  .getBlacklistUpdates().getBlacklistAdditions(), ", ");
 if (rm.getResourceScheduler() instanceof AbstractYarnScheduler) {
   AbstractYarnScheduler ayScheduler =
   (AbstractYarnScheduler) rm.getResourceScheduler();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
index 4cbe7a8..3d95ca1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
@@ -87,9 +87,6 @@ public class ApplicationSubmissionContextInfo {
   @XmlElement(name = "reservation-id")
   String reservationId;
 
-  @XmlElement(name = "am-black-listing-requests")
-  AMBlackListingRequestInfo amBlackListingRequestInfo;
-
   public ApplicationSubmissionContextInfo() {
 applicationId = "";
 applicationName = "";
@@ -106,7 +103,6 @@ public class ApplicationSubmissionContextInfo {
 logAggregationContextInfo = null;
 attemptFailuresValidityInterval = -1;
 reservationId = "";
-amBlackListingRequestInfo = null;
   }
 
   public String getApplicationId() {
@@ -173,10 +169,6 @@ public class ApplicationSubmissionContextInfo {
 return attemptFailuresValidityInterval;
   }
 
-  public AMBlackListingRequestInfo getAMBlackListingRequestInfo() {
-return amBlackListingRequestInfo;
-  }
-
   public String getReservationId() {
 return reservationId;
   }
@@ -252,9 +244,4 @@ public class ApplicationSubmissionContextInfo {
   public void setReservationId(String reservationId) {
 this.reservationId = reservationId;
   }
-
-  public void setAMBlackListingRequestInfo(
-  AMBlackListingRequestInfo amBlackListingRequestInfo) {
-this.amBlackListingRequestInfo = amBlackListingRequestInfo;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
--
diff --git 

[23/50] [abbrv] hadoop git commit: HDFS-9877. HDFS Namenode UI: Fix browsing directories that need to be encoded (Ravi Prakash via aw)

2016-06-07 Thread jianhe
HDFS-9877. HDFS Namenode UI: Fix browsing directories that need to be encoded 
(Ravi Prakash via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15f01843
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15f01843
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15f01843

Branch: refs/heads/YARN-4757
Commit: 15f018434c5b715729488fd0b03a11f1bc943470
Parents: 713cb71
Author: Allen Wittenauer 
Authored: Fri Jun 3 17:06:29 2016 -0700
Committer: Allen Wittenauer 
Committed: Fri Jun 3 17:06:29 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15f01843/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 102da9d..adb83a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -33,7 +33,7 @@
   $(window).bind('hashchange', function () {
 $('#alert-panel').hide();
 
-var dir = window.location.hash.slice(1);
+var dir = decodeURIComponent(window.location.hash.slice(1));
 if(dir == "") {
   dir = "/";
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: YARN-5185. StageAllocaterGreedyRLE: Fix NPE in corner case. (Carlo Curino via asuresh)

2016-06-07 Thread jianhe
YARN-5185. StageAllocaterGreedyRLE: Fix NPE in corner case. (Carlo Curino via 
asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a9b7372
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a9b7372
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a9b7372

Branch: refs/heads/YARN-4757
Commit: 7a9b7372a1a917c7b5e1beca7e13c0419e3dbfef
Parents: 6de9213
Author: Arun Suresh 
Authored: Mon Jun 6 21:06:52 2016 -0700
Committer: Arun Suresh 
Committed: Mon Jun 6 21:06:52 2016 -0700

--
 .../planning/StageAllocatorGreedyRLE.java   | 16 
 1 file changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a9b7372/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
index c5a3192..5e748fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
@@ -168,12 +168,20 @@ public class StageAllocatorGreedyRLE implements 
StageAllocator {
   if (allocateLeft) {
 // set earliest start to the min of the constraining "range" or my the
 // end of this allocation
-stageEarliestStart =
-Math.min(partialMap.higherKey(minPoint), stageEarliestStart + dur);
+if(partialMap.higherKey(minPoint) == null){
+  stageEarliestStart = stageEarliestStart + dur;
+} else {
+  stageEarliestStart =
+ Math.min(partialMap.higherKey(minPoint), stageEarliestStart + 
dur);
+}
   } else {
 // same as above moving right-to-left
-stageDeadline =
-Math.max(partialMap.higherKey(minPoint), stageDeadline - dur);
+if(partialMap.higherKey(minPoint) == null){
+  stageDeadline = stageDeadline - dur;
+} else {
+  stageDeadline =
+  Math.max(partialMap.higherKey(minPoint), stageDeadline - dur);
+}
   }
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: Revert "HDFS-10224. Implement asynchronous rename for DistributedFileSystem. Contributed by Xiaobing Zhou"

2016-06-07 Thread jianhe
Revert "HDFS-10224. Implement asynchronous rename for DistributedFileSystem.  
Contributed by Xiaobing Zhou"

This reverts commit fc94810d3f537e51e826fc21ade7867892b9d8dc.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/106234d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/106234d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/106234d8

Branch: refs/heads/YARN-4757
Commit: 106234d873c60fa52cd0d812fb1cdc0c6b998a6d
Parents: 4d36b22
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:55 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:55 2016 -0700

--
 .../java/org/apache/hadoop/fs/FileSystem.java   |   1 +
 .../main/java/org/apache/hadoop/ipc/Client.java |  11 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  34 +--
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |   2 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java | 110 
 .../hadoop/hdfs/DistributedFileSystem.java  |  22 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  45 +---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 258 ---
 8 files changed, 20 insertions(+), 463 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/106234d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 9e13a7a..0ecd8b7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1252,6 +1252,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   /**
* Renames Path src to Path dst
* 
+   * Fails if src is a file and dst is a directory.
* Fails if src is a directory and dst is a file.
* Fails if the parent of dst does not exist or is a file.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106234d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index d59aeb89..f206861 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -119,8 +119,7 @@ public class Client implements AutoCloseable {
 
   private static final ThreadLocal callId = new 
ThreadLocal();
   private static final ThreadLocal retryCount = new 
ThreadLocal();
-  private static final ThreadLocal
-  RETURN_RPC_RESPONSE = new ThreadLocal<>();
+  private static final ThreadLocal returnValue = new 
ThreadLocal<>();
   private static final ThreadLocal asynchronousMode =
   new ThreadLocal() {
 @Override
@@ -131,8 +130,8 @@ public class Client implements AutoCloseable {
 
   @SuppressWarnings("unchecked")
   @Unstable
-  public static  Future getReturnRpcResponse() {
-return (Future) RETURN_RPC_RESPONSE.get();
+  public static  Future getReturnValue() {
+return (Future) returnValue.get();
   }
 
   /** Set call id and retry count for the next call. */
@@ -1397,7 +1396,7 @@ public class Client implements AutoCloseable {
 }
   };
 
-  RETURN_RPC_RESPONSE.set(returnFuture);
+  returnValue.set(returnFuture);
   return null;
 } else {
   return getRpcResponse(call, connection);
@@ -1411,7 +1410,7 @@ public class Client implements AutoCloseable {
*  synchronous mode.
*/
   @Unstable
-  public static boolean isAsynchronousMode() {
+  static boolean isAsynchronousMode() {
 return asynchronousMode.get();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106234d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 8fcdb78..071e2e8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -26,9 +26,7 @@ import 

[37/50] [abbrv] hadoop git commit: Revert "Revert "HADOOP-13168. Support Future.get with timeout in ipc async calls.""

2016-06-07 Thread jianhe
Revert "Revert "HADOOP-13168. Support Future.get with timeout in ipc async 
calls.""

This reverts commit e4450d47f19131818e1c040b6bd8d85ae8250475.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/574dcd34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/574dcd34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/574dcd34

Branch: refs/heads/YARN-4757
Commit: 574dcd34c0da1903d25e37dc5757642a584dc3d0
Parents: cba9a01
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:23 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:23 2016 +0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 119 --
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  62 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  60 +
 .../hadoop/util/concurrent/AsyncGetFuture.java  |  73 +++
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 124 +++
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  24 +---
 .../ClientNamenodeProtocolTranslatorPB.java |  33 ++---
 7 files changed, 310 insertions(+), 185 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/574dcd34/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 9be4649..d1d5b17 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -18,46 +18,10 @@
 
 package org.apache.hadoop.ipc;
 
-import static org.apache.hadoop.ipc.RpcConstants.*;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.EOFException;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.Hashtable;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import javax.net.SocketFactory;
-import javax.security.sasl.Sasl;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.CodedOutputStream;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -93,14 +57,25 @@ import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.AsyncGet;
+import org.apache.hadoop.util.concurrent.AsyncGetFuture;
 import org.apache.htrace.core.Span;
 import org.apache.htrace.core.Tracer;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AbstractFuture;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.CodedOutputStream;
+import javax.net.SocketFactory;
+import javax.security.sasl.Sasl;
+import java.io.*;
+import java.net.*;
+import java.security.PrivilegedExceptionAction;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
+import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 
 /** A client for an IPC 

[42/50] [abbrv] hadoop git commit: MAPREDUCE-5044. Have AM trigger jstack on task attempts that timeout before killing them. (Eric Payne and Gera Shegalov via mingma)

2016-06-07 Thread jianhe
MAPREDUCE-5044. Have AM trigger jstack on task attempts that timeout before 
killing them. (Eric Payne and Gera Shegalov via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a1cedc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a1cedc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a1cedc0

Branch: refs/heads/YARN-4757
Commit: 4a1cedc010d3fa1d8ef3f2773ca12acadfee5ba5
Parents: 35f255b
Author: Ming Ma 
Authored: Mon Jun 6 14:30:51 2016 -0700
Committer: Ming Ma 
Committed: Mon Jun 6 14:30:51 2016 -0700

--
 .../hadoop/mapred/LocalContainerLauncher.java   |  28 +
 .../v2/app/job/impl/TaskAttemptImpl.java|   5 +-
 .../v2/app/launcher/ContainerLauncherEvent.java |  21 +++-
 .../v2/app/launcher/ContainerLauncherImpl.java  |  19 ++-
 .../v2/app/launcher/TestContainerLauncher.java  |  10 +-
 .../app/launcher/TestContainerLauncherImpl.java |   8 ++
 .../hadoop/mapred/ResourceMgrDelegate.java  |   5 +-
 .../hadoop/mapred/TestClientRedirect.java   |   2 +-
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  | 119 +++
 .../yarn/api/ApplicationClientProtocol.java |   2 +-
 .../yarn/api/ContainerManagementProtocol.java   |   5 +
 .../SignalContainerResponse.java|   2 +-
 .../main/proto/applicationclient_protocol.proto |   2 +-
 .../proto/containermanagement_protocol.proto|   1 +
 .../hadoop/yarn/client/api/YarnClient.java  |   2 +-
 .../yarn/client/api/impl/YarnClientImpl.java|   4 +-
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |   6 +-
 .../yarn/client/api/impl/TestYarnClient.java|   4 +-
 .../yarn/api/ContainerManagementProtocolPB.java |   7 ++
 .../ApplicationClientProtocolPBClientImpl.java  |   4 +-
 ...ContainerManagementProtocolPBClientImpl.java |  19 +++
 .../ApplicationClientProtocolPBServiceImpl.java |   5 +-
 ...ontainerManagementProtocolPBServiceImpl.java |  20 
 .../hadoop/yarn/TestContainerLaunchRPC.java |  10 ++
 .../yarn/TestContainerResourceIncreaseRPC.java  |   8 ++
 .../java/org/apache/hadoop/yarn/TestRPC.java|  10 ++
 .../containermanager/ContainerManagerImpl.java  |  38 --
 .../amrmproxy/MockResourceManagerFacade.java|   2 +-
 .../server/resourcemanager/ClientRMService.java |   2 +-
 .../yarn/server/resourcemanager/MockRM.java |   6 +-
 .../server/resourcemanager/NodeManager.java |   9 +-
 .../resourcemanager/TestAMAuthorization.java|   8 ++
 .../TestApplicationMasterLauncher.java  |   8 ++
 .../resourcemanager/TestSignalContainer.java|   2 +-
 34 files changed, 361 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a1cedc0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index da118c5..190d988 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -20,6 +20,10 @@ package org.apache.hadoop.mapred;
 
 import java.io.File;
 import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -255,6 +259,30 @@ public class LocalContainerLauncher extends 
AbstractService implements
 
 } else if (event.getType() == EventType.CONTAINER_REMOTE_CLEANUP) {
 
+  if (event.getDumpContainerThreads()) {
+try {
+  // Construct full thread dump header
+  System.out.println(new java.util.Date());
+  RuntimeMXBean rtBean = ManagementFactory.getRuntimeMXBean();
+  System.out.println("Full thread dump " + rtBean.getVmName()
+  + " (" + rtBean.getVmVersion()
+  + " " + rtBean.getSystemProperties().get("java.vm.info")
+  + "):\n");
+  // Dump threads' states and stacks
+  ThreadMXBean tmxBean = ManagementFactory.getThreadMXBean();
+  ThreadInfo[] tInfos = tmxBean.dumpAllThreads(
+  

[10/50] [abbrv] hadoop git commit: YARN-5180. Allow ResourceRequest to specify an enforceExecutionType flag. (asuresh)

2016-06-07 Thread jianhe
YARN-5180. Allow ResourceRequest to specify an enforceExecutionType flag. 
(asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc26601d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc26601d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc26601d

Branch: refs/heads/YARN-4757
Commit: dc26601d8fe27a4223a50601bf7522cc42e8e2f3
Parents: aadb77e
Author: Arun Suresh 
Authored: Thu Jun 2 05:18:01 2016 -0700
Committer: Arun Suresh 
Committed: Thu Jun 2 09:01:02 2016 -0700

--
 .../v2/app/rm/RMContainerRequestor.java |   4 +-
 .../yarn/api/records/ExecutionTypeRequest.java  | 124 +++
 .../yarn/api/records/ResourceRequest.java   |  34 ++---
 .../src/main/proto/yarn_protos.proto|   7 +-
 .../api/impl/TestDistributedScheduling.java |   9 +-
 .../impl/pb/ExecutionTypeRequestPBImpl.java |  93 ++
 .../yarn/api/records/impl/pb/ProtoUtils.java|  15 +++
 .../records/impl/pb/ResourceRequestPBImpl.java  |  52 
 .../hadoop/yarn/api/TestPBImplRecords.java  |   2 +
 .../nodemanager/scheduler/LocalScheduler.java   |   3 +-
 .../scheduler/TestLocalScheduler.java   |  11 +-
 .../TestDistributedSchedulingService.java   |  17 ++-
 12 files changed, 323 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc26601d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
index 7030712..f4579ab 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
@@ -41,6 +41,7 @@ import 
org.apache.hadoop.mapreduce.v2.app.client.ClientService;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -462,7 +463,8 @@ public abstract class RMContainerRequestor extends 
RMCommunicator {
   remoteRequest.setCapability(capability);
   remoteRequest.setNumContainers(0);
   remoteRequest.setNodeLabelExpression(nodeLabelExpression);
-  remoteRequest.setExecutionType(executionType);
+  remoteRequest.setExecutionTypeRequest(
+  ExecutionTypeRequest.newInstance(executionType, true));
   reqMap.put(capability, remoteRequest);
 }
 remoteRequest.setNumContainers(remoteRequest.getNumContainers() + 1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc26601d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ExecutionTypeRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ExecutionTypeRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ExecutionTypeRequest.java
new file mode 100644
index 000..f553a44
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ExecutionTypeRequest.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * 

[01/50] [abbrv] hadoop git commit: HADOOP-13137. TraceAdmin should support Kerberized cluster (Wei-Chiu Chuang via cmccabe)

2016-06-07 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/YARN-4757 9a31e5dfe -> be34e85e6


HADOOP-13137. TraceAdmin should support Kerberized cluster (Wei-Chiu Chuang via 
cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ceb06e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ceb06e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ceb06e2

Branch: refs/heads/YARN-4757
Commit: 8ceb06e2392763726210f96bb1c176e6a9fe7b53
Parents: c7921c9
Author: Colin Patrick Mccabe 
Authored: Tue May 31 17:54:34 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Tue May 31 17:54:34 2016 -0700

--
 .../org/apache/hadoop/tracing/TraceAdmin.java   | 16 -
 .../hadoop-common/src/site/markdown/Tracing.md  |  9 +++
 .../apache/hadoop/tracing/TestTraceAdmin.java   | 69 +++-
 3 files changed, 92 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ceb06e2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
index 5fdfbfa..4cf1ead 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
@@ -29,6 +29,7 @@ import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
@@ -36,6 +37,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A command-line tool for viewing and modifying tracing settings.
@@ -44,6 +47,7 @@ import org.apache.hadoop.util.Tool;
 public class TraceAdmin extends Configured implements Tool {
   private TraceAdminProtocolPB proxy;
   private TraceAdminProtocolTranslatorPB remote;
+  private static final Logger LOG = LoggerFactory.getLogger(TraceAdmin.class);
 
   private void usage() {
 PrintStream err = System.err;
@@ -61,7 +65,9 @@ public class TraceAdmin extends Configured implements Tool {
 "  -list: List the current span receivers.\n" +
 "  -remove [id]\n" +
 "Remove the span receiver with the specified id.  Use -list 
to\n" +
-"find the id of each receiver.\n"
+"find the id of each receiver.\n" +
+"  -principal: If the daemon is Kerberized, specify the service\n" 
+
+"principal name."
 );
   }
 
@@ -166,6 +172,14 @@ public class TraceAdmin extends Configured implements Tool 
{
   System.err.println("You must specify an operation.");
   return 1;
 }
+String servicePrincipal = StringUtils.popOptionWithArgument("-principal",
+args);
+if (servicePrincipal != null) {
+  LOG.debug("Set service principal: {}", servicePrincipal);
+  getConf().set(
+  CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
+  servicePrincipal);
+}
 RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class,
 ProtobufRpcEngine.class);
 InetSocketAddress address = NetUtils.createSocketAddr(hostPort);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ceb06e2/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
index 7b0e9ee..cbdee8a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
@@ -84,6 +84,15 @@ You can specify the configuration associated with span 
receiver by `-Ckey=value`
   ID  CLASS
   2   org.apache.htrace.core.LocalFileSpanReceiver
 
+If the cluster is Kerberized, the service principal name must be specified 
using `-principal` option.
+For example, to show list of span receivers of a namenode:
+
+$ hadoop trace -list -host NN1:8020 -principal namenode/n...@example.com
+
+Or, for a datanode:
+
+$ hadoop trace -list -host 

[07/50] [abbrv] hadoop git commit: HDFS-9476. TestDFSUpgradeFromImage#testUpgradeFromRel1BBWImage occasionally fail. Contributed by Masatake Iwasaki.

2016-06-07 Thread jianhe
HDFS-9476. TestDFSUpgradeFromImage#testUpgradeFromRel1BBWImage occasionally 
fail. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69555fca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69555fca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69555fca

Branch: refs/heads/YARN-4757
Commit: 69555fca066815053dd9168ebe15868a5c02cdcd
Parents: 16b1cc7
Author: Akira Ajisaka 
Authored: Thu Jun 2 18:52:47 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 2 18:52:47 2016 +0900

--
 .../apache/hadoop/hdfs/TestDFSUpgradeFromImage.java | 16 +---
 1 file changed, 13 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69555fca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index 1ba36f3..60bea7a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -173,7 +173,7 @@ public class TestDFSUpgradeFromImage {
   private static FSInputStream dfsOpenFileWithRetries(DistributedFileSystem 
dfs,
   String pathName) throws IOException {
 IOException exc = null;
-for (int tries = 0; tries < 10; tries++) {
+for (int tries = 0; tries < 30; tries++) {
   try {
 return dfs.dfs.open(pathName);
   } catch (IOException e) {
@@ -184,6 +184,7 @@ public class TestDFSUpgradeFromImage {
 throw exc;
   }
   try {
+LOG.info("Open failed. " + tries + " times. Retrying.");
 Thread.sleep(1000);
   } catch (InterruptedException ignored) {}
 }
@@ -570,8 +571,17 @@ public class TestDFSUpgradeFromImage {
 String pathStr = path.toString();
 HdfsFileStatus status = dfs.getFileInfo(pathStr);
 if (!status.isDir()) {
-  dfs.recoverLease(pathStr);
-  return;
+  for (int retries = 10; retries > 0; retries--) {
+if (dfs.recoverLease(pathStr)) {
+  return;
+} else {
+  try {
+Thread.sleep(1000);
+  } catch (InterruptedException ignored) {
+  }
+}
+  }
+  throw new IOException("Failed to recover lease of " + path);
 }
 byte prev[] = HdfsFileStatus.EMPTY_NAME;
 DirectoryListing dirList;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: YARN-1815. Work preserving recovery of Unmanged AMs. Contributed by Subru Krishnan

2016-06-07 Thread jianhe
YARN-1815. Work preserving recovery of Unmanged AMs. Contributed by Subru 
Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/097baaae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/097baaae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/097baaae

Branch: refs/heads/YARN-4757
Commit: 097baaaebae021c47bb7d69aa1ff1a2440df5166
Parents: c58a59f
Author: Jian He 
Authored: Fri Jun 3 10:49:30 2016 -0700
Committer: Jian He 
Committed: Fri Jun 3 10:49:30 2016 -0700

--
 .../rmapp/attempt/RMAppAttemptImpl.java | 28 +++---
 .../scheduler/AbstractYarnScheduler.java|  8 --
 .../yarn/server/resourcemanager/MockRM.java | 14 +++
 .../TestWorkPreservingRMRestart.java| 92 
 .../attempt/TestRMAppAttemptTransitions.java|  3 +-
 5 files changed, 122 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/097baaae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 1e2a293..75090fe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -354,8 +354,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   RMAppAttemptState.FAILED))
 
// Transitions from RUNNING State
-  .addTransition(RMAppAttemptState.RUNNING,
-  EnumSet.of(RMAppAttemptState.FINAL_SAVING, 
RMAppAttemptState.FINISHED),
+  .addTransition(RMAppAttemptState.RUNNING, RMAppAttemptState.FINAL_SAVING,
   RMAppAttemptEventType.UNREGISTERED, new AMUnregisteredTransition())
   .addTransition(RMAppAttemptState.RUNNING, RMAppAttemptState.RUNNING,
   RMAppAttemptEventType.STATUS_UPDATE, new StatusUpdateTransition())
@@ -1714,25 +1713,26 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 }
   }
 
-  private static final class AMUnregisteredTransition implements
-  MultipleArcTransition {
+  private static final class AMUnregisteredTransition extends BaseTransition {
 
 @Override
-public RMAppAttemptState transition(RMAppAttemptImpl appAttempt,
+public void transition(RMAppAttemptImpl appAttempt,
 RMAppAttemptEvent event) {
   // Tell the app
   if (appAttempt.getSubmissionContext().getUnmanagedAM()) {
+// YARN-1815: Saving the attempt final state so that we do not recover
+// the finished Unmanaged AM post RM failover
 // Unmanaged AMs have no container to wait for, so they skip
 // the FINISHING state and go straight to FINISHED.
-appAttempt.updateInfoOnAMUnregister(event);
-new FinalTransition(RMAppAttemptState.FINISHED).transition(
-appAttempt, event);
-return RMAppAttemptState.FINISHED;
+appAttempt.rememberTargetTransitionsAndStoreState(event,
+new AMFinishedAfterFinalSavingTransition(event),
+RMAppAttemptState.FINISHED, RMAppAttemptState.FINISHED);
+  } else {
+// Saving the attempt final state
+appAttempt.rememberTargetTransitionsAndStoreState(event,
+new FinalStateSavedAfterAMUnregisterTransition(),
+RMAppAttemptState.FINISHING, RMAppAttemptState.FINISHED);
   }
-  // Saving the attempt final state
-  appAttempt.rememberTargetTransitionsAndStoreState(event,
-new FinalStateSavedAfterAMUnregisterTransition(),
-RMAppAttemptState.FINISHING, RMAppAttemptState.FINISHED);
   ApplicationId applicationId =
   appAttempt.getAppAttemptId().getApplicationId();
 
@@ -1743,7 +1743,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   // AppAttempt to App after this point of time is AM/AppAttempt Finished.
   appAttempt.eventHandler.handle(new RMAppEvent(applicationId,
 RMAppEventType.ATTEMPT_UNREGISTERED));
-  

[17/50] [abbrv] hadoop git commit: YARN-5190. Registering/unregistering container metrics in ContainerMonitorImpl and ContainerImpl causing uncaught exception in ContainerMonitorImpl. Contributed by J

2016-06-07 Thread jianhe
YARN-5190. Registering/unregistering container metrics in ContainerMonitorImpl 
and ContainerImpl causing uncaught exception in ContainerMonitorImpl. 
Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99cc439e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99cc439e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99cc439e

Branch: refs/heads/YARN-4757
Commit: 99cc439e29794f8e61bebe03b2a7ca4b6743ec92
Parents: 097baaa
Author: Jian He 
Authored: Fri Jun 3 11:10:42 2016 -0700
Committer: Jian He 
Committed: Fri Jun 3 11:10:42 2016 -0700

--
 .../hadoop/metrics2/impl/MetricsSystemImpl.java   |  1 +
 .../hadoop/metrics2/lib/DefaultMetricsSystem.java |  9 +
 .../monitor/ContainerMetrics.java | 18 +-
 .../monitor/ContainersMonitorImpl.java| 16 
 .../monitor/TestContainerMetrics.java |  4 +++-
 5 files changed, 38 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99cc439e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
index ef7306b..6986edb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
@@ -255,6 +255,7 @@ public class MetricsSystemImpl extends MetricsSystem 
implements MetricsSource {
 if (namedCallbacks.containsKey(name)) {
   namedCallbacks.remove(name);
 }
+DefaultMetricsSystem.removeSourceName(name);
   }
 
   synchronized

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99cc439e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
index c761b58..935f47f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
@@ -116,6 +116,11 @@ public enum DefaultMetricsSystem {
   }
 
   @InterfaceAudience.Private
+  public static void removeSourceName(String name) {
+INSTANCE.removeSource(name);
+  }
+
+  @InterfaceAudience.Private
   public static String sourceName(String name, boolean dupOK) {
 return INSTANCE.newSourceName(name, dupOK);
   }
@@ -135,6 +140,10 @@ public enum DefaultMetricsSystem {
 mBeanNames.map.remove(name);
   }
 
+  synchronized void removeSource(String name) {
+sourceNames.map.remove(name);
+  }
+
   synchronized String newSourceName(String name, boolean dupOK) {
 if (sourceNames.map.containsKey(name)) {
   if (dupOK) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99cc439e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
index d59abda..31a9aa7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
@@ -198,6 +198,12 @@ public class ContainerMetrics implements MetricsSource {
 DefaultMetricsSystem.instance(), containerId, flushPeriodMs, delayMs);
   }
 
+  public synchronized static ContainerMetrics getContainerMetrics(
+  ContainerId containerId) {
+// could be null

[36/50] [abbrv] hadoop git commit: Revert "Revert "HDFS-10346. Implement asynchronous setPermission/setOwner for DistributedFileSystem. Contributed by Xiaobing Zhou""

2016-06-07 Thread jianhe
Revert "Revert "HDFS-10346. Implement asynchronous setPermission/setOwner for 
DistributedFileSystem.  Contributed by  Xiaobing Zhou""

This reverts commit f23d5dfc60a017187ae57f3667ac0e688877c2dd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cba9a018
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cba9a018
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cba9a018

Branch: refs/heads/YARN-4757
Commit: cba9a0188970cb33dcb95e9c49168ac4a83446d9
Parents: aa20fa1
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:29:38 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:29:38 2016 +0800

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../ClientNamenodeProtocolTranslatorPB.java |  39 ++-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 267 +--
 .../apache/hadoop/hdfs/TestDFSPermission.java   |  29 +-
 4 files changed, 351 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba9a018/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 356ae3f..4fe0861 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.ipc.Client;
 
@@ -37,6 +38,9 @@ import com.google.common.util.concurrent.AbstractFuture;
  * This instance of this class is the way end-user code interacts
  * with a Hadoop DistributedFileSystem in an asynchronous manner.
  *
+ * This class is unstable, so no guarantee is provided as to reliability,
+ * stability or compatibility across any level of release granularity.
+ *
  */
 @Unstable
 public class AsyncDistributedFileSystem {
@@ -111,4 +115,59 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
+
+  /**
+   * Set permission of a path.
+   *
+   * @param p
+   *  the path the permission is set to
+   * @param permission
+   *  the permission that is set to a path.
+   * @return an instance of Future, #get of which is invoked to wait for
+   * asynchronous call being finished.
+   */
+  public Future setPermission(Path p, final FsPermission permission)
+  throws IOException {
+dfs.getFsStatistics().incrementWriteOps(1);
+final Path absPath = dfs.fixRelativePart(p);
+final boolean isAsync = Client.isAsynchronousMode();
+Client.setAsynchronousMode(true);
+try {
+  dfs.getClient().setPermission(dfs.getPathName(absPath), permission);
+  return getReturnValue();
+} finally {
+  Client.setAsynchronousMode(isAsync);
+}
+  }
+
+  /**
+   * Set owner of a path (i.e. a file or a directory). The parameters username
+   * and groupname cannot both be null.
+   *
+   * @param p
+   *  The path
+   * @param username
+   *  If it is null, the original username remains unchanged.
+   * @param groupname
+   *  If it is null, the original groupname remains unchanged.
+   * @return an instance of Future, #get of which is invoked to wait for
+   * asynchronous call being finished.
+   */
+  public Future setOwner(Path p, String username, String groupname)
+  throws IOException {
+if (username == null && groupname == null) {
+  throw new IOException("username == null && groupname == null");
+}
+
+dfs.getFsStatistics().incrementWriteOps(1);
+final Path absPath = dfs.fixRelativePart(p);
+final boolean isAsync = Client.isAsynchronousMode();
+Client.setAsynchronousMode(true);
+try {
+  dfs.getClient().setOwner(dfs.getPathName(absPath), username, groupname);
+  return getReturnValue();
+} finally {
+  Client.setAsynchronousMode(isAsync);
+}
+  }
 }


[03/50] [abbrv] hadoop git commit: HADOOP-13228. Add delegation token to the connection in DelegationTokenAuthenticator. Contributed by Xiao Chen.

2016-06-07 Thread jianhe
HADOOP-13228. Add delegation token to the connection in 
DelegationTokenAuthenticator. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35356de1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35356de1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35356de1

Branch: refs/heads/YARN-4757
Commit: 35356de1ba1cad0fa469ff546263290109c61b77
Parents: 5870611
Author: Andrew Wang 
Authored: Wed Jun 1 13:13:17 2016 -0700
Committer: Andrew Wang 
Committed: Wed Jun 1 13:13:17 2016 -0700

--
 .../DelegationTokenAuthenticationHandler.java   |   7 ++
 .../web/DelegationTokenAuthenticator.java   |  19 
 .../delegation/web/TestWebDelegationToken.java  | 114 ++-
 3 files changed, 137 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35356de1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index 3f191de..95a849f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -51,6 +51,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
@@ -78,6 +80,9 @@ import com.google.common.annotations.VisibleForTesting;
 public abstract class DelegationTokenAuthenticationHandler
 implements AuthenticationHandler {
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(DelegationTokenAuthenticationHandler.class);
+
   protected static final String TYPE_POSTFIX = "-dt";
 
   public static final String PREFIX = "delegation-token.";
@@ -327,6 +332,8 @@ public abstract class DelegationTokenAuthenticationHandler
   throws IOException, AuthenticationException {
 AuthenticationToken token;
 String delegationParam = getDelegationToken(request);
+LOG.debug("Authenticating with delegationParam: {}, query string: {}",
+delegationParam, request.getQueryString());
 if (delegationParam != null) {
   try {
 Token dt = new Token();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35356de1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index 8a3a57f..46a0b1f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -121,6 +121,24 @@ public abstract class DelegationTokenAuthenticator 
implements Authenticator {
 return hasDt;
   }
 
+  /**
+   * Append the delegation token to the request header if needed.
+   */
+  private void appendDelegationToken(final AuthenticatedURL.Token token,
+  final Token dToken, final HttpURLConnection conn) throws IOException {
+if (token.isSet()) {
+  LOG.debug("Auth token is set, not appending delegation token.");
+  return;
+}
+if (dToken == null) {
+  LOG.warn("Delegation token is null, cannot set on request header.");
+  return;
+}
+conn.setRequestProperty(
+DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
+dToken.encodeToUrlString());
+  }
+
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
   throws IOException, AuthenticationException {
@@ -283,6 +301,7 @@ public abstract class DelegationTokenAuthenticator 
implements Authenticator {
 url = new URL(sb.toString());
 AuthenticatedURL aUrl = new AuthenticatedURL(this, 

[34/50] [abbrv] hadoop git commit: Revert "Revert "HDFS-10224. Implement asynchronous rename for DistributedFileSystem. Contributed by Xiaobing Zhou""

2016-06-07 Thread jianhe
Revert "Revert "HDFS-10224. Implement asynchronous rename for 
DistributedFileSystem.  Contributed by Xiaobing Zhou""

This reverts commit 106234d873c60fa52cd0d812fb1cdc0c6b998a6d.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eded3d10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eded3d10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eded3d10

Branch: refs/heads/YARN-4757
Commit: eded3d109e4c5225d8c5cd3c2d82e7ac93841263
Parents: 106234d
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:28:21 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:28:21 2016 +0800

--
 .../java/org/apache/hadoop/fs/FileSystem.java   |   1 -
 .../main/java/org/apache/hadoop/ipc/Client.java |  11 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  34 ++-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |   2 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java | 110 
 .../hadoop/hdfs/DistributedFileSystem.java  |  22 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  45 +++-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 258 +++
 8 files changed, 463 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eded3d10/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 0ecd8b7..9e13a7a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1252,7 +1252,6 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   /**
* Renames Path src to Path dst
* 
-   * Fails if src is a file and dst is a directory.
* Fails if src is a directory and dst is a file.
* Fails if the parent of dst does not exist or is a file.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eded3d10/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index f206861..d59aeb89 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -119,7 +119,8 @@ public class Client implements AutoCloseable {
 
   private static final ThreadLocal callId = new 
ThreadLocal();
   private static final ThreadLocal retryCount = new 
ThreadLocal();
-  private static final ThreadLocal returnValue = new 
ThreadLocal<>();
+  private static final ThreadLocal
+  RETURN_RPC_RESPONSE = new ThreadLocal<>();
   private static final ThreadLocal asynchronousMode =
   new ThreadLocal() {
 @Override
@@ -130,8 +131,8 @@ public class Client implements AutoCloseable {
 
   @SuppressWarnings("unchecked")
   @Unstable
-  public static  Future getReturnValue() {
-return (Future) returnValue.get();
+  public static  Future getReturnRpcResponse() {
+return (Future) RETURN_RPC_RESPONSE.get();
   }
 
   /** Set call id and retry count for the next call. */
@@ -1396,7 +1397,7 @@ public class Client implements AutoCloseable {
 }
   };
 
-  returnValue.set(returnFuture);
+  RETURN_RPC_RESPONSE.set(returnFuture);
   return null;
 } else {
   return getRpcResponse(call, connection);
@@ -1410,7 +1411,7 @@ public class Client implements AutoCloseable {
*  synchronous mode.
*/
   @Unstable
-  static boolean isAsynchronousMode() {
+  public static boolean isAsynchronousMode() {
 return asynchronousMode.get();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eded3d10/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 071e2e8..8fcdb78 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 

[40/50] [abbrv] hadoop git commit: Revert "Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. Contributed by Xiaobing Zhou.""

2016-06-07 Thread jianhe
Revert "Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. 
Contributed by Xiaobing Zhou.""

This reverts commit 8cf47d8589badfc07ef4bca3328a420c7c68abbd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e7b1ae0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e7b1ae0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e7b1ae0

Branch: refs/heads/YARN-4757
Commit: 7e7b1ae03759da0becfef677e1d5f7a2ed9041c3
Parents: db41e6d
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:38 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:38 2016 +0800

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 36 +---
 1 file changed, 1 insertion(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e7b1ae0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index ddcf492..c7615a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -34,7 +34,6 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -46,19 +45,16 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -445,7 +441,7 @@ public class TestAsyncDFS {
 for (int i = 0; i < NUM_TESTS; i++) {
   assertTrue(fs.exists(dsts[i]));
   FsPermission fsPerm = new FsPermission(permissions[i]);
-  checkAccessPermissions(fs.getFileStatus(dsts[i]), 
fsPerm.getUserAction());
+  fs.access(dsts[i], fsPerm.getUserAction());
 }
 
 // test setOwner
@@ -474,34 +470,4 @@ public class TestAsyncDFS {
   assertTrue("group2".equals(fs.getFileStatus(dsts[i]).getGroup()));
 }
   }
-
-  static void checkAccessPermissions(FileStatus stat, FsAction mode)
-  throws IOException {
-checkAccessPermissions(UserGroupInformation.getCurrentUser(), stat, mode);
-  }
-
-  static void checkAccessPermissions(final UserGroupInformation ugi,
-  FileStatus stat, FsAction mode) throws IOException {
-FsPermission perm = stat.getPermission();
-String user = ugi.getShortUserName();
-List groups = Arrays.asList(ugi.getGroupNames());
-
-if (user.equals(stat.getOwner())) {
-  if (perm.getUserAction().implies(mode)) {
-return;
-  }
-} else if (groups.contains(stat.getGroup())) {
-  if (perm.getGroupAction().implies(mode)) {
-return;
-  }
-} else {
-  if (perm.getOtherAction().implies(mode)) {
-return;
-  }
-}
-throw new AccessControlException(String.format(
-"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat
-.getPath(), stat.getOwner(), stat.getGroup(),
-stat.isDirectory() ? "d" : "-", perm));
-  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: HDFS-9833. Erasure coding: recomputing block checksum on the fly by reconstructing the missed/corrupt block data. Contributed by Rakesh R.

2016-06-07 Thread jianhe
HDFS-9833. Erasure coding: recomputing block checksum on the fly by 
reconstructing the missed/corrupt block data. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d749cf65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d749cf65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d749cf65

Branch: refs/heads/YARN-4757
Commit: d749cf65e1ab0e0daf5be86931507183f189e855
Parents: 8ceb06e
Author: Kai Zheng 
Authored: Thu Jun 2 12:56:21 2016 +0800
Committer: Kai Zheng 
Committed: Thu Jun 2 12:56:21 2016 +0800

--
 .../apache/hadoop/hdfs/FileChecksumHelper.java  |   3 +-
 .../hadoop/hdfs/protocol/StripedBlockInfo.java  |  10 +-
 .../hdfs/protocol/datatransfer/Sender.java  |   2 +
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  16 ++
 .../src/main/proto/datatransfer.proto   |   1 +
 .../hdfs/protocol/datatransfer/Receiver.java|   1 +
 .../server/datanode/BlockChecksumHelper.java| 172 ++-
 .../erasurecode/ErasureCodingWorker.java|  15 +-
 .../StripedBlockChecksumReconstructor.java  | 129 ++
 .../erasurecode/StripedBlockReconstructor.java  | 119 +
 .../datanode/erasurecode/StripedReader.java |  22 +--
 .../erasurecode/StripedReconstructionInfo.java  |  99 +++
 .../erasurecode/StripedReconstructor.java   | 169 +++---
 .../datanode/erasurecode/StripedWriter.java |  29 ++--
 .../hdfs/TestDecommissionWithStriped.java   |  47 +
 .../apache/hadoop/hdfs/TestFileChecksum.java|  41 -
 16 files changed, 675 insertions(+), 200 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d749cf65/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
index dfd9393..c213fa3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
@@ -460,7 +460,8 @@ final class FileChecksumHelper {
   setRemaining(getRemaining() - block.getNumBytes());
 
   StripedBlockInfo stripedBlockInfo = new StripedBlockInfo(block,
-  blockGroup.getLocations(), blockGroup.getBlockTokens(), ecPolicy);
+  blockGroup.getLocations(), blockGroup.getBlockTokens(),
+  blockGroup.getBlockIndices(), ecPolicy);
   DatanodeInfo[] datanodes = blockGroup.getLocations();
 
   //try each datanode in the block group.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d749cf65/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/StripedBlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/StripedBlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/StripedBlockInfo.java
index 74e8081..e46fabc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/StripedBlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/StripedBlockInfo.java
@@ -32,14 +32,16 @@ public class StripedBlockInfo {
   private final ExtendedBlock block;
   private final DatanodeInfo[] datanodes;
   private final Token[] blockTokens;
+  private final byte[] blockIndices;
   private final ErasureCodingPolicy ecPolicy;
 
   public StripedBlockInfo(ExtendedBlock block, DatanodeInfo[] datanodes,
-  Token[] blockTokens,
-  ErasureCodingPolicy ecPolicy) {
+  Token[] blockTokens, byte[] blockIndices,
+  ErasureCodingPolicy ecPolicy) {
 this.block = block;
 this.datanodes = datanodes;
 this.blockTokens = blockTokens;
+this.blockIndices = blockIndices;
 this.ecPolicy = ecPolicy;
   }
 
@@ -55,6 +57,10 @@ public class StripedBlockInfo {
 return blockTokens;
   }
 
+  public byte[] getBlockIndices() {
+return blockIndices;
+  }
+
   public ErasureCodingPolicy getErasureCodingPolicy() {
 return ecPolicy;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d749cf65/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
--
diff --git 

[20/50] [abbrv] hadoop git commit: HADOOP-13109. Add ability to edit existing token file via dtutil -alias flag (Matthew Paduano via aw)

2016-06-07 Thread jianhe
HADOOP-13109. Add ability to edit existing token file via dtutil -alias flag 
(Matthew Paduano via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78b3a038
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78b3a038
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78b3a038

Branch: refs/heads/YARN-4757
Commit: 78b3a038319cb351632250279f171b756c7f24b0
Parents: db54670
Author: Allen Wittenauer 
Authored: Fri Jun 3 15:34:24 2016 -0700
Committer: Allen Wittenauer 
Committed: Fri Jun 3 15:34:39 2016 -0700

--
 .../hadoop/security/token/DtFileOperations.java | 23 ++
 .../hadoop/security/token/DtUtilShell.java  | 47 +---
 .../hadoop/security/token/TestDtUtilShell.java  | 24 ++
 3 files changed, 89 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78b3a038/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
index bdda7c9..1396054 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
@@ -199,6 +199,29 @@ public final class DtFileOperations {
 doFormattedWrite(tokenFile, fileFormat, creds, conf);
   }
 
+  /** Alias a token from a file and save back to file in the local filesystem.
+   *  @param tokenFile a local File object to hold the input and output.
+   *  @param fileFormat a string equal to FORMAT_PB or FORMAT_JAVA, for output
+   *  @param alias overwrite service field of fetched token with this text.
+   *  @param service only apply alias to tokens matching this service text.
+   *  @param conf Configuration object passed along.
+   *  @throws IOException
+   */
+  public static void aliasTokenFile(File tokenFile, String fileFormat,
+  Text alias, Text service, Configuration conf) throws Exception {
+Credentials newCreds = new Credentials();
+Credentials creds = Credentials.readTokenStorageFile(tokenFile, conf);
+for (Token token : creds.getAllTokens()) {
+  newCreds.addToken(token.getService(), token);
+  if (token.getService().equals(service)) {
+Token aliasedToken = token.copyToken();
+aliasedToken.setService(alias);
+newCreds.addToken(alias, aliasedToken);
+  }
+}
+doFormattedWrite(tokenFile, fileFormat, newCreds, conf);
+  }
+
   /** Append tokens from list of files in local filesystem, saving to last 
file.
*  @param tokenFiles list of local File objects.  Last file holds the 
output.
*  @param fileFormat a string equal to FORMAT_PB or FORMAT_JAVA, for output

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78b3a038/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java
index fb74c31..f00e6fd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java
@@ -41,7 +41,7 @@ public class DtUtilShell extends CommandShell {
   DtFileOperations.FORMAT_PB + ")]";
   public static final String DT_USAGE = "hadoop dtutil " +
   "[-keytab  -principal ] " +
-  "subcommand (help|print|get|append|cancel|remove|renew) " +
+  "subcommand (help|print|get|edit|append|cancel|remove|renew) " +
  FORMAT_SUBSTRING + " [-alias ] filename...";
 
   // command line options
@@ -50,6 +50,7 @@ public class DtUtilShell extends CommandShell {
   private static final String PRINCIPAL = "-principal";
   private static final String PRINT = "print";
   private static final String GET = "get";
+  private static final String EDIT = "edit";
   private static final String APPEND = "append";
   private static final String CANCEL = "cancel";
   private static final String REMOVE = "remove";
@@ -127,6 +128,8 @@ public class DtUtilShell extends CommandShell {
   setSubCommand(new Print());
 } else if (command.equals(GET)) {
   setSubCommand(new Get(args[++i]));
+} else if 

[32/50] [abbrv] hadoop git commit: Revert "HADOOP-12957. Limit the number of outstanding async calls. Contributed by Xiaobing Zhou"

2016-06-07 Thread jianhe
Revert "HADOOP-12957. Limit the number of outstanding async calls.  Contributed 
by Xiaobing Zhou"

This reverts commit 1b9f18623ab55507bea94888317c7d63d0f4a6f2.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d36b221
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d36b221
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d36b221

Branch: refs/heads/YARN-4757
Commit: 4d36b221a24e3b626bb91093b0bb0fd377061cae
Parents: f23d5df
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:18 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:18 2016 -0700

--
 .../hadoop/fs/CommonConfigurationKeys.java  |   3 -
 .../ipc/AsyncCallLimitExceededException.java|  36 ---
 .../main/java/org/apache/hadoop/ipc/Client.java |  66 +
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 199 ++--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  12 +-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 238 ++-
 6 files changed, 109 insertions(+), 445 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d36b221/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 06614db..86e1b43 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -324,9 +324,6 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
 4*60*60; // 4 hours
   
-  public static final String  IPC_CLIENT_ASYNC_CALLS_MAX_KEY =
-  "ipc.client.async.calls.max";
-  public static final int IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT = 100;
   public static final String  IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = 
"ipc.client.fallback-to-simple-auth-allowed";
   public static final boolean 
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d36b221/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
deleted file mode 100644
index db97b6c..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ipc;
-
-import java.io.IOException;
-
-/**
- * Signals that an AsyncCallLimitExceededException has occurred. This class is
- * used to make application code using async RPC aware that limit of max async
- * calls is reached, application code need to retrieve results from response of
- * established async calls to avoid buffer overflow in order for follow-on 
async
- * calls going correctly.
- */
-public class AsyncCallLimitExceededException extends IOException {
-  private static final long serialVersionUID = 1L;
-
-  public AsyncCallLimitExceededException(String message) {
-super(message);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d36b221/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 

[50/50] [abbrv] hadoop git commit: HDFS-10468. HDFS read ends up ignoring an interrupt. Contributed by Jing Zhao

2016-06-07 Thread jianhe
HDFS-10468. HDFS read ends up ignoring an interrupt. Contributed by Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be34e85e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be34e85e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be34e85e

Branch: refs/heads/YARN-4757
Commit: be34e85e682880f46eee0310bf00ecc7d39cd5bd
Parents: c14c1b2
Author: Jing Zhao 
Authored: Tue Jun 7 10:48:21 2016 -0700
Committer: Jing Zhao 
Committed: Tue Jun 7 10:48:21 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 36 ++--
 .../java/org/apache/hadoop/hdfs/TestRead.java   | 87 
 .../server/datanode/SimulatedFSDataset.java |  4 +-
 3 files changed, 119 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be34e85e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 2ed0abd..7f32a56 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs;
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
+import java.nio.channels.ClosedByInterruptException;
 import java.util.AbstractMap;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -304,7 +306,7 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(waitTime);
 } catch (InterruptedException e) {
-  throw new IOException(
+  throw new InterruptedIOException(
   "Interrupted while getting the last block length.");
 }
   }
@@ -379,6 +381,7 @@ public class DFSInputStream extends FSInputStream
   return n;
 }
   } catch (IOException ioe) {
+checkInterrupted(ioe);
 if (ioe instanceof RemoteException) {
   if (((RemoteException) ioe).unwrapRemoteException() instanceof
   ReplicaNotFoundException) {
@@ -414,7 +417,8 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(500); // delay between retries.
 } catch (InterruptedException e) {
-  throw new IOException("Interrupted while getting the length.");
+  throw new InterruptedIOException(
+  "Interrupted while getting the length.");
 }
   }
 
@@ -660,6 +664,7 @@ public class DFSInputStream extends FSInputStream
 }
 return chosenNode;
   } catch (IOException ex) {
+checkInterrupted(ex);
 if (ex instanceof InvalidEncryptionKeyException && 
refetchEncryptionKey > 0) {
   DFSClient.LOG.info("Will fetch a new encryption key and retry, "
   + "encryption key was invalid when connecting to " + targetAddr
@@ -681,6 +686,15 @@ public class DFSInputStream extends FSInputStream
 }
   }
 
+  private void checkInterrupted(IOException e) throws IOException {
+if (Thread.currentThread().isInterrupted() &&
+(e instanceof ClosedByInterruptException ||
+e instanceof InterruptedIOException)) {
+  DFSClient.LOG.debug("The reading thread has been interrupted.", e);
+  throw e;
+}
+  }
+
   protected BlockReader getBlockReader(LocatedBlock targetBlock,
   long offsetInBlock, long length, InetSocketAddress targetAddr,
   StorageType storageType, DatanodeInfo datanode) throws IOException {
@@ -948,6 +962,7 @@ public class DFSInputStream extends FSInputStream
 } catch (ChecksumException ce) {
   throw ce;
 } catch (IOException e) {
+  checkInterrupted(e);
   if (retries == 1) {
 DFSClient.LOG.warn("DFS Read", e);
   }
@@ -1044,9 +1059,12 @@ public class DFSInputStream extends FSInputStream
   // expanding time window for each failure
   timeWindow * (failures + 1) *
   ThreadLocalRandom.current().nextDouble();
-  DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " 
IOException, will wait for " + waitTime + " msec.");
+  DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
+  " IOException, will wait for " + waitTime + " msec.");
   Thread.sleep((long)waitTime);
-} catch (InterruptedException ignored) {
+} catch 

[43/50] [abbrv] hadoop git commit: HADOOP-12807 S3AFileSystem should read AWS credentials from environment variables. Contributed by Tobin Baker.

2016-06-07 Thread jianhe
HADOOP-12807 S3AFileSystem should read AWS credentials from environment 
variables. Contributed by Tobin Baker.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3f78d8f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3f78d8f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3f78d8f

Branch: refs/heads/YARN-4757
Commit: a3f78d8fa83f07f9183f3546203a191fcf50008c
Parents: 4a1cedc
Author: Steve Loughran 
Authored: Mon Jun 6 23:40:49 2016 +0200
Committer: Steve Loughran 
Committed: Mon Jun 6 23:42:36 2016 +0200

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java  |  2 ++
 .../src/site/markdown/tools/hadoop-aws/index.md  | 19 +++
 2 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3f78d8f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index c028544..0281a3a 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
+import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.AmazonServiceException;
 import com.amazonaws.ClientConfiguration;
@@ -464,6 +465,7 @@ public class S3AFileSystem extends FileSystem {
   new BasicAWSCredentialsProvider(
   creds.getAccessKey(), creds.getAccessSecret()),
   new InstanceProfileCredentialsProvider(),
+  new EnvironmentVariableCredentialsProvider(),
   new AnonymousAWSCredentialsProvider()
   );
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3f78d8f/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 7a5e455..7d63a86 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -202,6 +202,25 @@ credentials in S3AFileSystem.
 For additional reading on the credential provider API see:
 [Credential Provider 
API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html).
 
+ Authenticating via environment variables
+
+S3A supports configuration via [the standard AWS environment 
variables](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-environment).
+
+The core environment variables are for the access key and associated secret:
+
+```
+export AWS_ACCESS_KEY_ID=my.aws.key
+export AWS_SECRET_ACCESS_KEY=my.secret.key
+```
+
+These environment variables can be used to set the authentication credentials
+instead of properties in the Hadoop configuration. *Important:* these
+environment variables are not propagated from client to server when
+YARN applications are launched. That is: having the AWS environment variables
+set when an application is launched will not permit the launched application
+to access S3 resources. The environment variables must (somehow) be set
+on the hosts/processes where the work is executed.
+
 # End to End Steps for Distcp and S3 with Credential Providers
 
 ## provision


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: HADOOP-10048. LocalDirAllocator should avoid holding locks while accessing the filesystem. Contributed by Jason Lowe.

2016-06-07 Thread jianhe
HADOOP-10048. LocalDirAllocator should avoid holding locks while accessing the 
filesystem. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c14c1b29
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c14c1b29
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c14c1b29

Branch: refs/heads/YARN-4757
Commit: c14c1b298e29e799f7c8f15ff24d7eba6e0cd39b
Parents: e620530
Author: Junping Du 
Authored: Tue Jun 7 09:18:58 2016 -0700
Committer: Junping Du 
Committed: Tue Jun 7 09:18:58 2016 -0700

--
 .../org/apache/hadoop/fs/LocalDirAllocator.java | 153 ---
 1 file changed, 94 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c14c1b29/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index 70cf87d..b14e1f0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -20,9 +20,10 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.commons.logging.*;
-
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -247,74 +248,101 @@ public class LocalDirAllocator {
 private final Log LOG =
   LogFactory.getLog(AllocatorPerContext.class);
 
-private int dirNumLastAccessed;
 private Random dirIndexRandomizer = new Random();
-private FileSystem localFS;
-private DF[] dirDF = new DF[0];
 private String contextCfgItemName;
-private String[] localDirs = new String[0];
-private String savedLocalDirs = "";
+
+// NOTE: the context must be accessed via a local reference as it
+//   may be updated at any time to reference a different context
+private AtomicReference currentContext;
+
+private static class Context {
+  private AtomicInteger dirNumLastAccessed = new AtomicInteger(0);
+  private FileSystem localFS;
+  private DF[] dirDF;
+  private Path[] localDirs;
+  private String savedLocalDirs;
+
+  public int getAndIncrDirNumLastAccessed() {
+return getAndIncrDirNumLastAccessed(1);
+  }
+
+  public int getAndIncrDirNumLastAccessed(int delta) {
+if (localDirs.length < 2 || delta == 0) {
+  return dirNumLastAccessed.get();
+}
+int oldval, newval;
+do {
+  oldval = dirNumLastAccessed.get();
+  newval = (oldval + delta) % localDirs.length;
+} while (!dirNumLastAccessed.compareAndSet(oldval, newval));
+return oldval;
+  }
+}
 
 public AllocatorPerContext(String contextCfgItemName) {
   this.contextCfgItemName = contextCfgItemName;
+  this.currentContext = new AtomicReference(new Context());
 }
 
 /** This method gets called everytime before any read/write to make sure
  * that any change to localDirs is reflected immediately.
  */
-private synchronized void confChanged(Configuration conf) 
+private Context confChanged(Configuration conf)
 throws IOException {
+  Context ctx = currentContext.get();
   String newLocalDirs = conf.get(contextCfgItemName);
   if (null == newLocalDirs) {
 throw new IOException(contextCfgItemName + " not configured");
   }
-  if (!newLocalDirs.equals(savedLocalDirs)) {
-localDirs = StringUtils.getTrimmedStrings(newLocalDirs);
-localFS = FileSystem.getLocal(conf);
-int numDirs = localDirs.length;
-ArrayList dirs = new ArrayList(numDirs);
+  if (!newLocalDirs.equals(ctx.savedLocalDirs)) {
+ctx = new Context();
+String[] dirStrings = StringUtils.getTrimmedStrings(newLocalDirs);
+ctx.localFS = FileSystem.getLocal(conf);
+int numDirs = dirStrings.length;
+ArrayList dirs = new ArrayList(numDirs);
 ArrayList dfList = new ArrayList(numDirs);
 for (int i = 0; i < numDirs; i++) {
   try {
 // filter problematic directories
-Path tmpDir = new Path(localDirs[i]);
-if(localFS.mkdirs(tmpDir)|| localFS.exists(tmpDir)) {
+Path tmpDir = new Path(dirStrings[i]);
+if(ctx.localFS.mkdirs(tmpDir)|| ctx.localFS.exists(tmpDir)) 

[21/50] [abbrv] hadoop git commit: HADOOP-13105. Support timeouts in LDAP queries in LdapGroupsMapping. Contributed by Mingliang Liu.

2016-06-07 Thread jianhe
HADOOP-13105. Support timeouts in LDAP queries in LdapGroupsMapping. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d82bc850
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d82bc850
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d82bc850

Branch: refs/heads/YARN-4757
Commit: d82bc8501869be78780fc09752dbf7af918c14af
Parents: 78b3a03
Author: Chris Nauroth 
Authored: Fri Jun 3 16:38:30 2016 -0700
Committer: Chris Nauroth 
Committed: Fri Jun 3 16:38:30 2016 -0700

--
 .../hadoop/security/LdapGroupsMapping.java  |  12 ++
 .../src/main/resources/core-default.xml |  24 
 .../hadoop/security/TestLdapGroupsMapping.java  | 140 +++
 3 files changed, 176 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d82bc850/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index 498b92e..da87369 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -179,6 +179,13 @@ public class LdapGroupsMapping
 LDAP_CONFIG_PREFIX + ".directory.search.timeout";
   public static final int DIRECTORY_SEARCH_TIMEOUT_DEFAULT = 1; // 10s
 
+  public static final String CONNECTION_TIMEOUT =
+  LDAP_CONFIG_PREFIX + ".connection.timeout.ms";
+  public static final int CONNECTION_TIMEOUT_DEFAULT = 60 * 1000; // 60 seconds
+  public static final String READ_TIMEOUT =
+  LDAP_CONFIG_PREFIX + ".read.timeout.ms";
+  public static final int READ_TIMEOUT_DEFAULT = 60 * 1000; // 60 seconds
+
   private static final Log LOG = LogFactory.getLog(LdapGroupsMapping.class);
 
   private static final SearchControls SEARCH_CONTROLS = new SearchControls();
@@ -432,6 +439,11 @@ public class LdapGroupsMapping
   env.put(Context.SECURITY_PRINCIPAL, bindUser);
   env.put(Context.SECURITY_CREDENTIALS, bindPassword);
 
+  env.put("com.sun.jndi.ldap.connect.timeout", conf.get(CONNECTION_TIMEOUT,
+  String.valueOf(CONNECTION_TIMEOUT_DEFAULT)));
+  env.put("com.sun.jndi.ldap.read.timeout", conf.get(READ_TIMEOUT,
+  String.valueOf(READ_TIMEOUT_DEFAULT)));
+
   ctx = new InitialDirContext(env);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d82bc850/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index b3f8cd5..a65246b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -166,6 +166,30 @@
 
 
 
+  hadoop.security.group.mapping.ldap.connection.timeout.ms
+  6
+  
+This property is the connection timeout (in milliseconds) for LDAP
+operations. If the LDAP provider doesn't establish a connection within the
+specified period, it will abort the connect attempt. Non-positive value
+means no LDAP connection timeout is specified in which case it waits for 
the
+connection to establish until the underlying network times out.
+  
+
+
+
+  hadoop.security.group.mapping.ldap.read.timeout.ms
+  6
+  
+This property is the read timeout (in milliseconds) for LDAP
+operations. If the LDAP provider doesn't get a LDAP response within the
+specified period, it will abort the read attempt. Non-positive value
+means no read timeout is specified in which case it waits for the response
+infinitely.
+  
+
+
+
   hadoop.security.group.mapping.ldap.url
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d82bc850/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
index 9319016..9f9f994 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
+++ 

[25/50] [abbrv] hadoop git commit: HDFS-10481. HTTPFS server should correctly impersonate as end user to open file. Contributed by Xiao Chen.

2016-06-07 Thread jianhe
HDFS-10481. HTTPFS server should correctly impersonate as end user to open 
file. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47e0321e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47e0321e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47e0321e

Branch: refs/heads/YARN-4757
Commit: 47e0321ee91149331e6ae72e7caa41d1de078b6c
Parents: 99a771c
Author: Andrew Wang 
Authored: Fri Jun 3 17:21:17 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 17:21:17 2016 -0700

--
 .../hadoop/fs/http/server/HttpFSServer.java | 218 ++-
 1 file changed, 114 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47e0321e/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index b7b63fa..db4692a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -79,6 +79,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 import java.security.AccessControlException;
+import java.security.PrivilegedExceptionAction;
 import java.text.MessageFormat;
 import java.util.EnumSet;
 import java.util.List;
@@ -94,6 +95,7 @@ import java.util.Map;
 @InterfaceAudience.Private
 public class HttpFSServer {
   private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
+  private static final Logger LOG = 
LoggerFactory.getLogger(HttpFSServer.class);
 
   /**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem 
for the effective
@@ -205,115 +207,123 @@ public class HttpFSServer {
 MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
 MDC.put("hostname", request.getRemoteAddr());
 switch (op.value()) {
-  case OPEN: {
-//Invoking the command directly using an unmanaged FileSystem that is
-// released by the FileSystemReleaseFilter
-FSOperations.FSOpen command = new FSOperations.FSOpen(path);
-FileSystem fs = createFileSystem(user);
-InputStream is = command.execute(fs);
-Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
-Long len = params.get(LenParam.NAME, LenParam.class);
-AUDIT_LOG.info("[{}] offset [{}] len [{}]",
-   new Object[]{path, offset, len});
-InputStreamEntity entity = new InputStreamEntity(is, offset, len);
-response =
+case OPEN: {
+  //Invoking the command directly using an unmanaged FileSystem that is
+  // released by the FileSystemReleaseFilter
+  final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
+  final FileSystem fs = createFileSystem(user);
+  InputStream is = null;
+  UserGroupInformation ugi = UserGroupInformation
+  .createProxyUser(user.getShortUserName(),
+  UserGroupInformation.getLoginUser());
+  try {
+is = ugi.doAs(new PrivilegedExceptionAction() {
+  @Override
+  public InputStream run() throws Exception {
+return command.execute(fs);
+  }
+});
+  } catch (InterruptedException ie) {
+LOG.info("Open interrupted.", ie);
+Thread.currentThread().interrupt();
+  }
+  Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
+  Long len = params.get(LenParam.NAME, LenParam.class);
+  AUDIT_LOG.info("[{}] offset [{}] len [{}]",
+  new Object[] { path, offset, len });
+  InputStreamEntity entity = new InputStreamEntity(is, offset, len);
+  response =
   Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
-break;
-  }
-  case GETFILESTATUS: {
-FSOperations.FSFileStatus command =
-  new FSOperations.FSFileStatus(path);
-Map json = fsExecute(user, command);
-AUDIT_LOG.info("[{}]", path);
-response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-break;
-  }
-  case LISTSTATUS: {
-String filter = params.get(FilterParam.NAME, FilterParam.class);
-FSOperations.FSListStatus command = new FSOperations.FSListStatus(
-  path, filter);
-Map json = fsExecute(user, command);
-AUDIT_LOG.info("[{}] filter [{}]", path,
-   (filter != 

[44/50] [abbrv] hadoop git commit: HDFS-10458. getFileEncryptionInfo should return quickly for non-encrypted cluster.

2016-06-07 Thread jianhe
HDFS-10458. getFileEncryptionInfo should return quickly for non-encrypted 
cluster.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6de9213d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6de9213d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6de9213d

Branch: refs/heads/YARN-4757
Commit: 6de9213df111a9a4ed875db995d67af72d08a798
Parents: a3f78d8
Author: Zhe Zhang 
Authored: Mon Jun 6 15:52:39 2016 -0700
Committer: Zhe Zhang 
Committed: Mon Jun 6 15:52:39 2016 -0700

--
 .../server/namenode/EncryptionZoneManager.java  | 35 +---
 .../server/namenode/FSDirEncryptionZoneOp.java  |  2 +-
 2 files changed, 31 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6de9213d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 8454c04..41dbb59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -95,7 +95,7 @@ public class EncryptionZoneManager {
 }
   }
 
-  private final TreeMap encryptionZones;
+  private TreeMap encryptionZones = null;
   private final FSDirectory dir;
   private final int maxListEncryptionZonesResponses;
 
@@ -106,7 +106,6 @@ public class EncryptionZoneManager {
*/
   public EncryptionZoneManager(FSDirectory dir, Configuration conf) {
 this.dir = dir;
-encryptionZones = new TreeMap();
 maxListEncryptionZonesResponses = conf.getInt(
 DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
 DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT
@@ -143,6 +142,9 @@ public class EncryptionZoneManager {
   CipherSuite suite, CryptoProtocolVersion version, String keyName) {
 final EncryptionZoneInt ez = new EncryptionZoneInt(
 inodeId, suite, version, keyName);
+if (encryptionZones == null) {
+  encryptionZones = new TreeMap<>();
+}
 encryptionZones.put(inodeId, ez);
   }
 
@@ -153,7 +155,9 @@ public class EncryptionZoneManager {
*/
   void removeEncryptionZone(Long inodeId) {
 assert dir.hasWriteLock();
-encryptionZones.remove(inodeId);
+if (hasCreatedEncryptionZone()) {
+  encryptionZones.remove(inodeId);
+}
   }
 
   /**
@@ -201,6 +205,9 @@ public class EncryptionZoneManager {
   private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) {
 assert dir.hasReadLock();
 Preconditions.checkNotNull(iip);
+if (!hasCreatedEncryptionZone()) {
+  return null;
+}
 List inodes = iip.getReadOnlyINodes();
 for (int i = inodes.size() - 1; i >= 0; i--) {
   final INode inode = inodes.get(i);
@@ -313,7 +320,8 @@ public class EncryptionZoneManager {
   throw new IOException("Attempt to create an encryption zone for a 
file.");
 }
 
-if (encryptionZones.get(srcINode.getId()) != null) {
+if (hasCreatedEncryptionZone() && encryptionZones.
+get(srcINode.getId()) != null) {
   throw new IOException("Directory " + src + " is already an encryption " +
   "zone.");
 }
@@ -340,6 +348,9 @@ public class EncryptionZoneManager {
   BatchedListEntries listEncryptionZones(long prevId)
   throws IOException {
 assert dir.hasReadLock();
+if (!hasCreatedEncryptionZone()) {
+  return new BatchedListEntries(Lists.newArrayList(), 
false);
+}
 NavigableMap tailMap = encryptionZones.tailMap
 (prevId, false);
 final int numResponses = Math.min(maxListEncryptionZonesResponses,
@@ -379,7 +390,18 @@ public class EncryptionZoneManager {
* @return number of encryption zones.
*/
   public int getNumEncryptionZones() {
-return encryptionZones.size();
+return hasCreatedEncryptionZone() ?
+encryptionZones.size() : 0;
+  }
+
+  /**
+   * @return Whether there has been any attempt to create an encryption zone in
+   * the cluster at all. If not, it is safe to quickly return null when
+   * checking the encryption information of any file or directory in the
+   * cluster.
+   */
+  public boolean hasCreatedEncryptionZone() {
+return encryptionZones != null;
   }
 
   /**
@@ -387,6 +409,9 @@ public class 

[47/50] [abbrv] hadoop git commit: YARN-5118. Tests fails with localizer port bind exception. Contributed by Brahma Reddy Battula.

2016-06-07 Thread jianhe
YARN-5118. Tests fails with localizer port bind exception. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bddea5fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bddea5fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bddea5fe

Branch: refs/heads/YARN-4757
Commit: bddea5fe5fe72eee8e2ecfcec616bd8ceb4d72e7
Parents: 3a154f7
Author: Rohith Sharma K S 
Authored: Tue Jun 7 11:20:15 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Jun 7 11:20:15 2016 +0530

--
 .../apache/hadoop/yarn/server/nodemanager/TestEventFlow.java  | 3 +++
 .../server/nodemanager/TestNodeStatusUpdaterForLabels.java| 7 +++
 .../containermanager/BaseContainerManagerTest.java| 3 +++
 3 files changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddea5fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
index f126080..a9ff83c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
@@ -25,6 +25,7 @@ import java.util.List;
 
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
@@ -91,6 +92,8 @@ public class TestEventFlow {
 conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
 conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, 
 remoteLogDir.getAbsolutePath());
+conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:"
++ ServerSocketUtil.getPort(8040, 10));
 
 ContainerExecutor exec = new DefaultContainerExecutor();
 exec.setConf(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddea5fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
index 563104e..257e18c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
@@ -28,6 +28,7 @@ import java.nio.ByteBuffer;
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.service.ServiceOperations;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -233,6 +234,9 @@ public class TestNodeStatusUpdaterForLabels extends 
NodeLabelTestBase {
 
 YarnConfiguration conf = createNMConfigForDistributeNodeLabels();
 conf.setLong(YarnConfiguration.NM_NODE_LABELS_RESYNC_INTERVAL, 2000);
+conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:"
++ ServerSocketUtil.getPort(8040, 10));
+
 nm.init(conf);
 resourceTracker.resetNMHeartbeatReceiveFlag();
 nm.start();
@@ -329,6 +333,9 @@ public class TestNodeStatusUpdaterForLabels extends 
NodeLabelTestBase {
 };
 dummyLabelsProviderRef.setNodeLabels(toNodeLabelSet("P"));
 YarnConfiguration conf = createNMConfigForDistributeNodeLabels();
+conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:"
++ 

[27/50] [abbrv] hadoop git commit: Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. Contributed by Xiaobing Zhou."

2016-06-07 Thread jianhe
Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. Contributed by 
Xiaobing Zhou."

This reverts commit 21890c4239b6a82fd6aab3454ce396efe7b5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cf47d85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cf47d85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cf47d85

Branch: refs/heads/YARN-4757
Commit: 8cf47d8589badfc07ef4bca3328a420c7c68abbd
Parents: 5360da8
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:12 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:12 2016 -0700

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 36 +++-
 1 file changed, 35 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf47d85/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index c7615a9..ddcf492 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -34,6 +34,7 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -45,16 +46,19 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -441,7 +445,7 @@ public class TestAsyncDFS {
 for (int i = 0; i < NUM_TESTS; i++) {
   assertTrue(fs.exists(dsts[i]));
   FsPermission fsPerm = new FsPermission(permissions[i]);
-  fs.access(dsts[i], fsPerm.getUserAction());
+  checkAccessPermissions(fs.getFileStatus(dsts[i]), 
fsPerm.getUserAction());
 }
 
 // test setOwner
@@ -470,4 +474,34 @@ public class TestAsyncDFS {
   assertTrue("group2".equals(fs.getFileStatus(dsts[i]).getGroup()));
 }
   }
+
+  static void checkAccessPermissions(FileStatus stat, FsAction mode)
+  throws IOException {
+checkAccessPermissions(UserGroupInformation.getCurrentUser(), stat, mode);
+  }
+
+  static void checkAccessPermissions(final UserGroupInformation ugi,
+  FileStatus stat, FsAction mode) throws IOException {
+FsPermission perm = stat.getPermission();
+String user = ugi.getShortUserName();
+List groups = Arrays.asList(ugi.getGroupNames());
+
+if (user.equals(stat.getOwner())) {
+  if (perm.getUserAction().implies(mode)) {
+return;
+  }
+} else if (groups.contains(stat.getGroup())) {
+  if (perm.getGroupAction().implies(mode)) {
+return;
+  }
+} else {
+  if (perm.getOtherAction().implies(mode)) {
+return;
+  }
+}
+throw new AccessControlException(String.format(
+"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat
+.getPath(), stat.getOwner(), stat.getGroup(),
+stat.isDirectory() ? "d" : "-", perm));
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for DistributedFileSystem. Contributed by Xiaobing Zhou"

2016-06-07 Thread jianhe
Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for 
DistributedFileSystem.  Contributed by Xiaobing Zhou"

This reverts commit 02d4e478a398c24a5e5e8ea2b0822a5b9d4a97ae.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b82c74b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b82c74b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b82c74b9

Branch: refs/heads/YARN-4757
Commit: b82c74b9102ba95eae776501ed4484be9edd8c96
Parents: 5ee5912
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:14 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:14 2016 -0700

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../hadoop/hdfs/DistributedFileSystem.java  |   3 -
 .../ClientNamenodeProtocolTranslatorPB.java |  30 +-
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 310 ---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  |  15 +-
 .../hdfs/server/namenode/FSAclBaseTest.java |  12 +-
 6 files changed, 18 insertions(+), 411 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b82c74b9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 29bac2a..6bfd71d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -19,16 +19,12 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
-import java.util.List;
 import java.util.concurrent.Future;
 
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.util.concurrent.AsyncGetFuture;
 import org.apache.hadoop.ipc.Client;
@@ -87,7 +83,6 @@ public class AsyncDistributedFileSystem {
   public Future rename(Path src, Path dst,
   final Options.Rename... options) throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.RENAME);
 
 final Path absSrc = dfs.fixRelativePart(src);
 final Path absDst = dfs.fixRelativePart(dst);
@@ -116,7 +111,6 @@ public class AsyncDistributedFileSystem {
   public Future setPermission(Path p, final FsPermission permission)
   throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_PERMISSION);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -148,7 +142,6 @@ public class AsyncDistributedFileSystem {
 }
 
 dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_OWNER);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -159,56 +152,4 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
-
-  /**
-   * Fully replaces ACL of files and directories, discarding all existing
-   * entries.
-   *
-   * @param p
-   *  Path to modify
-   * @param aclSpec
-   *  List describing modifications, must include entries for
-   *  user, group, and others for compatibility with permission bits.
-   * @throws IOException
-   *   if an ACL could not be modified
-   * @return an instance of Future, #get of which is invoked to wait for
-   * asynchronous call being finished.
-   */
-  public Future setAcl(Path p, final List aclSpec)
-  throws IOException {
-dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_ACL);
-final Path absPath = dfs.fixRelativePart(p);
-final boolean isAsync = Client.isAsynchronousMode();
-Client.setAsynchronousMode(true);
-try {
-  dfs.getClient().setAcl(dfs.getPathName(absPath), aclSpec);
-  return getReturnValue();
-} finally {
-  

[41/50] [abbrv] hadoop git commit: Revert "Revert "HADOOP-13226 Support async call retry and failover.""

2016-06-07 Thread jianhe
Revert "Revert "HADOOP-13226 Support async call retry and failover.""

This reverts commit 5360da8bd9f720384860f411bee081aef13b4bd4.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35f255b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35f255b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35f255b0

Branch: refs/heads/YARN-4757
Commit: 35f255b03b1bb5c94063ec1818af1d253ceee991
Parents: 7e7b1ae
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:43 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:43 2016 +0800

--
 .../dev-support/findbugsExcludeFile.xml |   8 +-
 .../hadoop/io/retry/AsyncCallHandler.java   | 321 +++
 .../org/apache/hadoop/io/retry/CallReturn.java  |  75 +
 .../hadoop/io/retry/RetryInvocationHandler.java | 134 ++--
 .../apache/hadoop/io/retry/RetryPolicies.java   |   4 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |  25 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  13 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  17 +-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |  10 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |   7 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  42 +--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java|  43 +--
 .../apache/hadoop/hdfs/TestAsyncHDFSWithHA.java | 181 +++
 .../hdfs/server/namenode/ha/HATestUtil.java |   9 +-
 14 files changed, 775 insertions(+), 114 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35f255b0/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index ab8673b..a644aa5 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -345,7 +345,13 @@

  
 
- 
+ 
+ 
+   
+   
+   
+ 
+
  



http://git-wip-us.apache.org/repos/asf/hadoop/blob/35f255b0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
new file mode 100644
index 000..5a03b03
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.retry;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.AsyncGet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Method;
+import java.util.LinkedList;
+import java.util.Queue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+
+/** Handle async calls. */
+@InterfaceAudience.Private
+public class AsyncCallHandler {
+  static final Logger LOG = LoggerFactory.getLogger(AsyncCallHandler.class);
+
+  private static final ThreadLocal
+  LOWER_LAYER_ASYNC_RETURN = new ThreadLocal<>();
+  private static final ThreadLocal>
+  ASYNC_RETURN = new ThreadLocal<>();
+
+  /** @return the async return value from {@link AsyncCallHandler}. */
+  

[46/50] [abbrv] hadoop git commit: YARN-4525. Fix bug in RLESparseResourceAllocation.getRangeOverlapping(). (Ishai Menache and Carlo Curino via asuresh)

2016-06-07 Thread jianhe
YARN-4525. Fix bug in RLESparseResourceAllocation.getRangeOverlapping(). (Ishai 
Menache and Carlo Curino via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a154f75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a154f75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a154f75

Branch: refs/heads/YARN-4757
Commit: 3a154f75ed85d864b3ffd35818992418f2b6aa59
Parents: 7a9b737
Author: Arun Suresh 
Authored: Mon Jun 6 21:18:32 2016 -0700
Committer: Arun Suresh 
Committed: Mon Jun 6 21:18:32 2016 -0700

--
 .../RLESparseResourceAllocation.java|  6 +-
 .../TestRLESparseResourceAllocation.java| 22 
 2 files changed, 27 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a154f75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
index 63defb5..c18a93e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
@@ -510,7 +510,11 @@ public class RLESparseResourceAllocation {
   long previous = a.floorKey(start);
   a = a.tailMap(previous, true);
 }
-a = a.headMap(end, true);
+
+if (end < a.lastKey()) {
+  a = a.headMap(end, true);
+}
+
   }
   RLESparseResourceAllocation ret =
   new RLESparseResourceAllocation(a, resourceCalculator);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a154f75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
index b526484..f8d2a4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
@@ -283,6 +283,28 @@ public class TestRLESparseResourceAllocation {
   }
 
   @Test
+  public void testRangeOverlapping() {
+ResourceCalculator resCalc = new DefaultResourceCalculator();
+
+RLESparseResourceAllocation r =
+new RLESparseResourceAllocation(resCalc);
+int[] alloc = {10, 10, 10, 10, 10, 10};
+int start = 100;
+Set> inputs =
+generateAllocation(start, alloc, false).entrySet();
+for (Entry ip : inputs) {
+  r.addInterval(ip.getKey(), ip.getValue());
+}
+long s = r.getEarliestStartTime();
+long d = r.getLatestNonNullTime();
+
+// tries to trigger "out-of-range" bug
+r =  r.getRangeOverlapping(s, d);
+r = r.getRangeOverlapping(s-1, d-1);
+r = r.getRangeOverlapping(s+1, d+1);
+  }
+
+  @Test
   public void testBlocks() {
 ResourceCalculator resCalc = new DefaultResourceCalculator();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: HDFS-10471. DFSAdmin#SetQuotaCommand's help msg is not correct. Contributed by Yiqun Lin.

2016-06-07 Thread jianhe
HDFS-10471. DFSAdmin#SetQuotaCommand's help msg is not correct. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1df6f573
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1df6f573
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1df6f573

Branch: refs/heads/YARN-4757
Commit: 1df6f5735c9d85e644d99d3ebfc4459490657004
Parents: ead61c4
Author: Akira Ajisaka 
Authored: Fri Jun 3 04:10:32 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Jun 3 04:10:32 2016 +0900

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 42 ++--
 .../src/test/resources/testHDFSConf.xml |  2 +-
 2 files changed, 22 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1df6f573/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 08d3da5..45c4952 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -165,15 +165,15 @@ public class DFSAdmin extends FsShell {
 private static final String USAGE =
   "-"+NAME+"  ...";
 private static final String DESCRIPTION = 
-  "-setQuota  ...: " +
-  "Set the quota  for each directory .\n" + 
-  "\t\tThe directory quota is a long integer that puts a hard limit\n" +
-  "\t\ton the number of names in the directory tree\n" +
-  "\t\tFor each directory, attempt to set the quota. An error will be 
reported if\n" +
-  "\t\t1. N is not a positive integer, or\n" +
-  "\t\t2. User is not an administrator, or\n" +
-  "\t\t3. The directory does not exist or is a file.\n" +
-  "\t\tNote: A quota of 1 would force the directory to remain empty.\n";
+"-setQuota  ...: " +
+"Set the quota  for each directory .\n" +
+"\t\tThe directory quota is a long integer that puts a hard limit\n" +
+"\t\ton the number of names in the directory tree\n" +
+"\t\tFor each directory, attempt to set the quota. An error will be 
reported if\n" +
+"\t\t1. quota is not a positive integer, or\n" +
+"\t\t2. User is not an administrator, or\n" +
+"\t\t3. The directory does not exist or is a file.\n" +
+"\t\tNote: A quota of 1 would force the directory to remain empty.\n";
 
 private final long quota; // the quota to be set
 
@@ -263,18 +263,18 @@ public class DFSAdmin extends FsShell {
 private static final String USAGE =
   "-"+NAME+"  [-storageType ] ...";
 private static final String DESCRIPTION = USAGE + ": " +
-  "Set the space quota  for each directory .\n" +
-  "\t\tThe space quota is a long integer that puts a hard limit\n" +
-  "\t\ton the total size of all the files under the directory tree.\n" +
-  "\t\tThe extra space required for replication is also counted. E.g.\n" +
-  "\t\ta 1GB file with replication of 3 consumes 3GB of the quota.\n\n" +
-  "\t\tQuota can also be specified with a binary prefix for terabytes,\n" +
-  "\t\tpetabytes etc (e.g. 50t is 50TB, 5m is 5MB, 3p is 3PB).\n" + 
-  "\t\tFor each directory, attempt to set the quota. An error will be 
reported if\n" +
-  "\t\t1. N is not a positive integer, or\n" +
-  "\t\t2. user is not an administrator, or\n" +
-  "\t\t3. the directory does not exist or is a file.\n" +
-  "\t\tThe storage type specific quota is set when -storageType option is 
specified.\n";
+"Set the space quota  for each directory .\n" +
+"\t\tThe space quota is a long integer that puts a hard limit\n" +
+"\t\ton the total size of all the files under the directory tree.\n" +
+"\t\tThe extra space required for replication is also counted. E.g.\n" 
+
+"\t\ta 1GB file with replication of 3 consumes 3GB of the quota.\n\n" +
+"\t\tQuota can also be specified with a binary prefix for 
terabytes,\n" +
+"\t\tpetabytes etc (e.g. 50t is 50TB, 5m is 5MB, 3p is 3PB).\n" +
+"\t\tFor each directory, attempt to set the quota. An error will be 
reported if\n" +
+"\t\t1. quota is not a positive integer or zero, or\n" +
+"\t\t2. user is not an administrator, or\n" +
+"\t\t3. the directory does not exist or is a file.\n" +
+"\t\tThe storage type specific quota is set when -storageType option 
is specified.\n";
 
 private long quota; // the quota to 

[09/50] [abbrv] hadoop git commit: HDFS-10367. TestDFSShell.testMoveWithTargetPortEmpty fails with Address bind exception. Contributed by Brahma Reddy Battula.

2016-06-07 Thread jianhe
HDFS-10367. TestDFSShell.testMoveWithTargetPortEmpty fails with Address bind 
exception. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aadb77e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aadb77e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aadb77e4

Branch: refs/heads/YARN-4757
Commit: aadb77e412ab9d4ad05a0bd8b37d547ba5adad03
Parents: 99675e0
Author: Masatake Iwasaki 
Authored: Thu Jun 2 20:55:04 2016 +0900
Committer: Masatake Iwasaki 
Committed: Thu Jun 2 20:55:04 2016 +0900

--
 .../org/apache/hadoop/net/ServerSocketUtil.java | 39 
 .../org/apache/hadoop/hdfs/TestDFSShell.java|  4 +-
 2 files changed, 42 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aadb77e4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
index a3e1fff..023c1ed 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
@@ -63,4 +63,43 @@ public class ServerSocketUtil {
 }
   }
 
+  /**
+   * Check whether port is available or not.
+   *
+   * @param port given port
+   * @return
+   */
+  private static boolean isPortAvailable(int port) {
+try (ServerSocket s = new ServerSocket(port)) {
+  return true;
+} catch (IOException e) {
+  return false;
+}
+  }
+
+  /**
+   * Wait till the port available.
+   *
+   * @param port given port
+   * @param retries number of retries for given port
+   * @return
+   * @throws InterruptedException
+   * @throws IOException
+   */
+  public static int waitForPort(int port, int retries)
+  throws InterruptedException, IOException {
+int tries = 0;
+while (true) {
+  if (isPortAvailable(port)) {
+return port;
+  } else {
+tries++;
+if (tries >= retries) {
+  throw new IOException(
+  "Port is already in use; giving up after " + tries + " times.");
+}
+Thread.sleep(1000);
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aadb77e4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index fc3de75..e31de13 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.compress.BZip2Codec;
 import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -567,7 +568,8 @@ public class TestDFSShell {
   cluster = new MiniDFSCluster.Builder(conf)
   .format(true)
   .numDataNodes(2)
-  .nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)
+  .nameNodePort(ServerSocketUtil.waitForPort(
+  HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT, 10))
   .waitSafeMode(true)
   .build();
   FileSystem srcFs = cluster.getFileSystem();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: Revert "Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for DistributedFileSystem. Contributed by Xiaobing Zhou""

2016-06-07 Thread jianhe
Revert "Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for 
DistributedFileSystem.  Contributed by Xiaobing Zhou""

This reverts commit b82c74b9102ba95eae776501ed4484be9edd8c96.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3d81f38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3d81f38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3d81f38

Branch: refs/heads/YARN-4757
Commit: b3d81f38da5d3d913e7b7ed498198c899c1e68b7
Parents: 574dcd3
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:30 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:30 2016 +0800

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../hadoop/hdfs/DistributedFileSystem.java  |   3 +
 .../ClientNamenodeProtocolTranslatorPB.java |  30 +-
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 310 +++
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  |  15 +-
 .../hdfs/server/namenode/FSAclBaseTest.java |  12 +-
 6 files changed, 411 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d81f38/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 6bfd71d..29bac2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -19,12 +19,16 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.concurrent.Future;
 
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.util.concurrent.AsyncGetFuture;
 import org.apache.hadoop.ipc.Client;
@@ -83,6 +87,7 @@ public class AsyncDistributedFileSystem {
   public Future rename(Path src, Path dst,
   final Options.Rename... options) throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.RENAME);
 
 final Path absSrc = dfs.fixRelativePart(src);
 final Path absDst = dfs.fixRelativePart(dst);
@@ -111,6 +116,7 @@ public class AsyncDistributedFileSystem {
   public Future setPermission(Path p, final FsPermission permission)
   throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_PERMISSION);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -142,6 +148,7 @@ public class AsyncDistributedFileSystem {
 }
 
 dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_OWNER);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -152,4 +159,56 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
+
+  /**
+   * Fully replaces ACL of files and directories, discarding all existing
+   * entries.
+   *
+   * @param p
+   *  Path to modify
+   * @param aclSpec
+   *  List describing modifications, must include entries for
+   *  user, group, and others for compatibility with permission bits.
+   * @throws IOException
+   *   if an ACL could not be modified
+   * @return an instance of Future, #get of which is invoked to wait for
+   * asynchronous call being finished.
+   */
+  public Future setAcl(Path p, final List aclSpec)
+  throws IOException {
+dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_ACL);
+final Path absPath = dfs.fixRelativePart(p);
+final boolean isAsync = Client.isAsynchronousMode();
+Client.setAsynchronousMode(true);
+try {
+  dfs.getClient().setAcl(dfs.getPathName(absPath), aclSpec);
+  return getReturnValue();
+} 

[26/50] [abbrv] hadoop git commit: Revert "HADOOP-13226 Support async call retry and failover."

2016-06-07 Thread jianhe
Revert "HADOOP-13226 Support async call retry and failover."

This reverts commit 83f2f78c118a7e52aba5104bd97b0acedc96be7b.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5360da8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5360da8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5360da8b

Branch: refs/heads/YARN-4757
Commit: 5360da8bd9f720384860f411bee081aef13b4bd4
Parents: 47e0321
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:09 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:09 2016 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   8 +-
 .../hadoop/io/retry/AsyncCallHandler.java   | 321 ---
 .../org/apache/hadoop/io/retry/CallReturn.java  |  75 -
 .../hadoop/io/retry/RetryInvocationHandler.java | 134 ++--
 .../apache/hadoop/io/retry/RetryPolicies.java   |   4 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |  25 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  13 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  17 +-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |  10 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |   7 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  42 +--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java|  43 ++-
 .../apache/hadoop/hdfs/TestAsyncHDFSWithHA.java | 181 ---
 .../hdfs/server/namenode/ha/HATestUtil.java |   9 +-
 14 files changed, 114 insertions(+), 775 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5360da8b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index a644aa5..ab8673b 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -345,13 +345,7 @@

  
 
- 
- 
-   
-   
-   
- 
-
+ 
  



http://git-wip-us.apache.org/repos/asf/hadoop/blob/5360da8b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
deleted file mode 100644
index 5a03b03..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.retry;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.concurrent.AsyncGet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.reflect.Method;
-import java.util.LinkedList;
-import java.util.Queue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-
-/** Handle async calls. */
-@InterfaceAudience.Private
-public class AsyncCallHandler {
-  static final Logger LOG = LoggerFactory.getLogger(AsyncCallHandler.class);
-
-  private static final ThreadLocal
-  LOWER_LAYER_ASYNC_RETURN = new ThreadLocal<>();
-  private static final ThreadLocal>
-  ASYNC_RETURN = new ThreadLocal<>();
-
-  /** @return the async return value from {@link AsyncCallHandler}. */
-  @InterfaceStability.Unstable
-  

[18/50] [abbrv] hadoop git commit: YARN-5098. Fixed ResourceManager's DelegationTokenRenewer to replace expiring system-tokens if RM stops and only restarts after a long time. Contributed by Jian He.

2016-06-07 Thread jianhe
YARN-5098. Fixed ResourceManager's DelegationTokenRenewer to replace expiring 
system-tokens if RM stops and only restarts after a long time. Contributed by 
Jian He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f10ebc67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f10ebc67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f10ebc67

Branch: refs/heads/YARN-4757
Commit: f10ebc67f57a4a2e3cc916c41154ab9b6a4635c9
Parents: 99cc439
Author: Vinod Kumar Vavilapalli 
Authored: Fri Jun 3 13:00:07 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Fri Jun 3 13:00:07 2016 -0700

--
 .../security/DelegationTokenRenewer.java| 27 --
 .../security/TestDelegationTokenRenewer.java| 98 
 2 files changed, 118 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f10ebc67/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index fd12f11..4177ee2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.service.AbstractService;
@@ -459,6 +460,18 @@ public class DelegationTokenRenewer extends 
AbstractService {
   try {
 renewToken(dttr);
   } catch (IOException ioe) {
+if (ioe instanceof SecretManager.InvalidToken
+&& dttr.maxDate < Time.now()
+&& evt instanceof DelegationTokenRenewerAppRecoverEvent
+&& token.getKind().equals(HDFS_DELEGATION_KIND)) {
+  LOG.info("Failed to renew hdfs token " + dttr
+  + " on recovery as it expired, requesting new hdfs token for 
"
+  + applicationId + ", user=" + evt.getUser(), ioe);
+  requestNewHdfsDelegationTokenAsProxyUser(
+  Arrays.asList(applicationId), evt.getUser(),
+  evt.shouldCancelAtEnd());
+  continue;
+}
 throw new IOException("Failed to renew token: " + dttr.token, ioe);
   }
 }
@@ -485,7 +498,8 @@ public class DelegationTokenRenewer extends AbstractService 
{
 }
 
 if (!hasHdfsToken) {
-  requestNewHdfsDelegationToken(Arrays.asList(applicationId), 
evt.getUser(),
+  requestNewHdfsDelegationTokenAsProxyUser(Arrays.asList(applicationId),
+  evt.getUser(),
 shouldCancelAtEnd);
 }
   }
@@ -586,8 +600,7 @@ public class DelegationTokenRenewer extends AbstractService 
{
 } catch (InterruptedException e) {
   throw new IOException(e);
 }
-LOG.info("Renewed delegation-token= [" + dttr + "], for "
-+ dttr.referringAppIds);
+LOG.info("Renewed delegation-token= [" + dttr + "]");
   }
 
   // Request new hdfs token if the token is about to expire, and remove the old
@@ -625,12 +638,12 @@ public class DelegationTokenRenewer extends 
AbstractService {
 }
   }
   LOG.info("Token= (" + dttr + ") is expiring, request new token.");
-  requestNewHdfsDelegationToken(applicationIds, dttr.user,
+  requestNewHdfsDelegationTokenAsProxyUser(applicationIds, dttr.user,
   dttr.shouldCancelAtEnd);
 }
   }
 
-  private void requestNewHdfsDelegationToken(
+  private void requestNewHdfsDelegationTokenAsProxyUser(
   Collection referringAppIds,
   String user, boolean shouldCancelAtEnd) throws IOException,
   InterruptedException {
@@ -912,8 +925,8 @@ public class DelegationTokenRenewer extends AbstractService 
{
   // 

[39/50] [abbrv] hadoop git commit: Revert "Revert "HDFS-10431 Refactor and speedup TestAsyncDFSRename. Contributed by Xiaobing Zhou""

2016-06-07 Thread jianhe
Revert "Revert "HDFS-10431 Refactor and speedup TestAsyncDFSRename.  
Contributed by Xiaobing Zhou""

This reverts commit 5ee5912ebd541d5b4c33ecd46dfdebe1e23b56c3.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db41e6d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db41e6d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db41e6d2

Branch: refs/heads/YARN-4757
Commit: db41e6d285a3b425ffd7c11c7baa8253c7929439
Parents: b3d81f3
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:34 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:34 2016 +0800

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 233 +++-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 563 ---
 2 files changed, 313 insertions(+), 483 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db41e6d2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index 67262dd..ddcf492 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -29,13 +29,16 @@ import static 
org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Random;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 
@@ -43,15 +46,21 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -63,21 +72,28 @@ import com.google.common.collect.Lists;
  * */
 public class TestAsyncDFS {
   public static final Log LOG = LogFactory.getLog(TestAsyncDFS.class);
-  private static final int NUM_TESTS = 1000;
+  private final short replFactor = 1;
+  private final long blockSize = 512;
+  private long fileLen = blockSize * 3;
+  private final long seed = Time.now();
+  private final Random r = new Random(seed);
+  private final PermissionGenerator permGenerator = new PermissionGenerator(r);
+  private static final int NUM_TESTS = 50;
   private static final int NUM_NN_HANDLER = 10;
-  private static final int ASYNC_CALL_LIMIT = 100;
+  private static final int ASYNC_CALL_LIMIT = 1000;
 
   private Configuration conf;
   private MiniDFSCluster cluster;
   private FileSystem fs;
+  private AsyncDistributedFileSystem adfs;
 
   @Before
   public void setup() throws IOException {
 conf = new HdfsConfiguration();
 // explicitly turn on acl
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
-// explicitly turn on ACL
-conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+// explicitly turn on permission checking
+conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
 // set the limit of max async calls
 conf.setInt(CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
 ASYNC_CALL_LIMIT);
@@ -86,6 +102,7 @@ public class TestAsyncDFS {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
 cluster.waitActive();
 fs = FileSystem.get(conf);
+adfs = 

[24/50] [abbrv] hadoop git commit: HDFS-7767. Use the noredirect flag in WebHDFS to allow web browsers to upload files via the NN UI (Ravi Prakash via aw)

2016-06-07 Thread jianhe
HDFS-7767. Use the noredirect flag in WebHDFS to allow web browsers to upload 
files via the NN UI (Ravi Prakash via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99a771cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99a771cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99a771cd

Branch: refs/heads/YARN-4757
Commit: 99a771cd7a3f792a76ac89c406b82a983c059d28
Parents: 15f0184
Author: Allen Wittenauer 
Authored: Fri Jun 3 17:07:39 2016 -0700
Committer: Allen Wittenauer 
Committed: Fri Jun 3 17:07:39 2016 -0700

--
 .../src/main/webapps/hdfs/explorer.html | 25 +-
 .../src/main/webapps/hdfs/explorer.js   | 51 
 2 files changed, 74 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99a771cd/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 5106006..51f72e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -119,6 +119,23 @@
   
 
 
+
+  
+ 
+   
+ Upload File
+   
+   
+ 
+   
+   
+ Close
+ Upload
+   
+ 
+  
+
   
 
   
@@ -142,7 +159,7 @@
   
 
   
-  
+  
 
   
 
@@ -152,12 +169,16 @@
   
 
   
-  
+  
 
 
 
+
+
+
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99a771cd/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index adb83a8..6fa5f19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -366,5 +366,56 @@
 });
   })
 
+  $('#modal-upload-file-button').click(function() {
+$(this).prop('disabled', true);
+$(this).button('complete');
+var files = []
+var numCompleted = 0
+
+for(var i = 0; i < $('#modal-upload-file-input').prop('files').length; 
i++) {
+  (function() {
+var file = $('#modal-upload-file-input').prop('files')[i];
+var url = '/webhdfs/v1' + current_directory;
+url = encode_path(append_path(url, file.name));
+url += '?op=CREATE=true';
+files.push( { file: file } )
+files[i].request = $.ajax({
+  type: 'PUT',
+  url: url,
+  processData: false,
+  crossDomain: true
+});
+  })()
+ }
+for(var f in files) {
+  (function() {
+var file = files[f];
+file.request.done(function(data) {
+  var url = data['Location'];
+  $.ajax({
+type: 'PUT',
+url: url,
+data: file.file,
+processData: false,
+crossDomain: true
+  }).complete(function(data) {
+numCompleted++;
+if(numCompleted == files.length) {
+  $('#modal-upload-file').modal('hide');
+  $('#modal-upload-file-button').button('reset');
+  browse_directory(current_directory);
+}
+  }).error(function(jqXHR, textStatus, errorThrown) {
+numCompleted++;
+show_err_msg("Couldn't upload the file " + file.file.name + ". "+ 
errorThrown);
+  });
+}).error(function(jqXHR, textStatus, errorThrown) {
+  numCompleted++;
+  show_err_msg("Couldn't find datanode to write file. " + errorThrown);
+});
+  })();
+}
+  });
+
   init();
 })();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: HADOOP-13232. Typo in exception in ValueQueue.java. Contributed by Jiayi Zhou.

2016-06-07 Thread jianhe
HADOOP-13232. Typo in exception in ValueQueue.java. Contributed by Jiayi Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99675e00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99675e00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99675e00

Branch: refs/heads/YARN-4757
Commit: 99675e00df65e84dcc02082b795f1f93c90b794e
Parents: 69555fc
Author: Akira Ajisaka 
Authored: Thu Jun 2 19:14:53 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 2 19:14:53 2016 +0900

--
 .../src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99675e00/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
index f38a6b3..4af 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
@@ -307,7 +307,7 @@ public class ValueQueue  {
 ekvs.add(val);
   }
 } catch (Exception e) {
-  throw new IOException("Exeption while contacting value generator ", e);
+  throw new IOException("Exception while contacting value generator ", e);
 }
 return ekvs;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: YARN-5165. Fix NoOvercommitPolicy to take advantage of RLE representation of plan. (Carlo Curino via asuresh)

2016-06-07 Thread jianhe
YARN-5165. Fix NoOvercommitPolicy to take advantage of RLE representation of 
plan. (Carlo Curino via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db54670e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db54670e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db54670e

Branch: refs/heads/YARN-4757
Commit: db54670e83a84c1d7deff2c225725687cf9e5f14
Parents: f10ebc6
Author: Arun Suresh 
Authored: Fri Jun 3 14:49:32 2016 -0700
Committer: Arun Suresh 
Committed: Fri Jun 3 14:49:32 2016 -0700

--
 .../reservation/NoOverCommitPolicy.java | 38 
 .../planning/TestSimpleCapacityReplanner.java   |  2 +-
 2 files changed, 15 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db54670e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
index 119520b..814d4b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
@@ -21,11 +21,9 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.reservation;
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ReservationId;
-import org.apache.hadoop.yarn.api.records.Resource;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.MismatchedUserException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.ResourceOverCommitException;
-import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
  * This policy enforce a simple physical cluster capacity constraints, by
@@ -52,29 +50,21 @@ public class NoOverCommitPolicy implements SharingPolicy {
   + oldReservation.getUser() + " != " + reservation.getUser());
 }
 
-long startTime = reservation.getStartTime();
-long endTime = reservation.getEndTime();
-long step = plan.getStep();
+RLESparseResourceAllocation available = plan.getAvailableResourceOverTime(
+reservation.getUser(), reservation.getReservationId(),
+reservation.getStartTime(), reservation.getEndTime());
 
-// for every instant in time, check we are respecting cluster capacity
-for (long t = startTime; t < endTime; t += step) {
-  Resource currExistingAllocTot = plan.getTotalCommittedResources(t);
-  Resource currNewAlloc = reservation.getResourcesAtTime(t);
-  Resource currOldAlloc = Resource.newInstance(0, 0);
-  if (oldReservation != null) {
-oldReservation.getResourcesAtTime(t);
-  }
-  // check the cluster is never over committed
-  // currExistingAllocTot + currNewAlloc - currOldAlloc >
-  // capPlan.getTotalCapacity()
-  if (Resources.greaterThan(plan.getResourceCalculator(), plan
-  .getTotalCapacity(), Resources.subtract(
-  Resources.add(currExistingAllocTot, currNewAlloc), currOldAlloc),
-  plan.getTotalCapacity())) {
-throw new ResourceOverCommitException("Resources at time " + t
-+ " would be overcommitted by " + "accepting reservation: "
-+ reservation.getReservationId());
-  }
+// test the reservation does not exceed what is available
+try {
+  RLESparseResourceAllocation
+  .merge(plan.getResourceCalculator(), plan.getTotalCapacity(),
+  available, reservation.getResourcesOverTime(),
+  RLESparseResourceAllocation.RLEOperator.subtractTestNonNegative,
+  reservation.getStartTime(), reservation.getEndTime());
+} catch (PlanningException p) {
+  throw new ResourceOverCommitException(
+  "Resources at time " + " would be overcommitted by "
+  + "accepting reservation: " + reservation.getReservationId());

[22/50] [abbrv] hadoop git commit: HADOOP-13155. Implement TokenRenewer to renew and cancel delegation tokens in KMS. Contributed by Xiao Chen.

2016-06-07 Thread jianhe
HADOOP-13155. Implement TokenRenewer to renew and cancel delegation tokens in 
KMS. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/713cb718
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/713cb718
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/713cb718

Branch: refs/heads/YARN-4757
Commit: 713cb71820ad94a5436f35824d07aa12fcba5cc6
Parents: d82bc85
Author: Andrew Wang 
Authored: Fri Jun 3 16:48:54 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 16:48:54 2016 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  30 +++-
 .../crypto/key/kms/KMSClientProvider.java   | 158 +--
 .../key/kms/LoadBalancingKMSClientProvider.java |  21 +++
 .../java/org/apache/hadoop/util/KMSUtil.java|  76 +
 ...rg.apache.hadoop.security.token.TokenRenewer |  14 ++
 .../key/kms/server/KMSAuthenticationFilter.java |   2 +-
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 127 +--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  38 ++---
 8 files changed, 410 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/713cb718/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 2f237c6..9212cbc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -34,7 +34,7 @@ public class KeyProviderDelegationTokenExtension extends
   new DefaultDelegationTokenExtension();
 
   /**
-   * DelegationTokenExtension is a type of Extension that exposes methods to 
+   * DelegationTokenExtension is a type of Extension that exposes methods
* needed to work with Delegation Tokens.
*/  
   public interface DelegationTokenExtension extends 
@@ -49,8 +49,23 @@ public class KeyProviderDelegationTokenExtension extends
  * @return list of new delegation tokens
  * @throws IOException thrown if IOException if an IO error occurs.
  */
-public Token[] addDelegationTokens(final String renewer, 
+Token[] addDelegationTokens(final String renewer,
 Credentials credentials) throws IOException;
+
+/**
+ * Renews the given token.
+ * @param token The token to be renewed.
+ * @return The token's lifetime after renewal, or 0 if it can't be renewed.
+ * @throws IOException
+ */
+long renewDelegationToken(final Token token) throws IOException;
+
+/**
+ * Cancels the given token.
+ * @param token The token to be cancelled.
+ * @throws IOException
+ */
+Void cancelDelegationToken(final Token token) throws IOException;
   }
   
   /**
@@ -65,7 +80,16 @@ public class KeyProviderDelegationTokenExtension extends
 Credentials credentials) {
   return null;
 }
-
+
+@Override
+public long renewDelegationToken(final Token token) throws IOException {
+  return 0;
+}
+
+@Override
+public Void cancelDelegationToken(final Token token) throws IOException 
{
+  return null;
+}
   }
 
   private KeyProviderDelegationTokenExtension(KeyProvider keyProvider,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/713cb718/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 32ef09c..f4103b4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -38,8 +38,11 @@ import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenRenewer;
+import 

[02/50] [abbrv] hadoop git commit: HADOOP-13162. Consider reducing number of getFileStatus calls in S3AFileSystem.mkdirs. (Rajesh Balamohan via stevel)

2016-06-07 Thread jianhe
HADOOP-13162. Consider reducing number of getFileStatus calls in 
S3AFileSystem.mkdirs. (Rajesh Balamohan via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58706110
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58706110
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58706110

Branch: refs/heads/YARN-4757
Commit: 587061103097160d8aceb60dbef6958cafdd30ae
Parents: d749cf6
Author: Steve Loughran 
Authored: Wed Jun 1 14:17:18 2016 +0100
Committer: Steve Loughran 
Committed: Wed Jun 1 14:18:20 2016 +0100

--
 .../fs/FileContextCreateMkdirBaseTest.java  | 73 +++-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  3 +
 2 files changed, 73 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58706110/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index d91091f..c1de27a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,14 +20,15 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
-import org.apache.commons.logging.impl.Log4JLogger;
+import static 
org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
+
 import org.apache.hadoop.test.GenericTestUtils;
 
 /**
@@ -116,7 +117,73 @@ public abstract class FileContextCreateMkdirBaseTest {
 fc.mkdir(f, FileContext.DEFAULT_PERM, true);
 Assert.assertTrue(isDir(fc, f));
   }
- 
+
+  @Test
+  public void testMkdirsRecursiveWithExistingDir() throws IOException {
+Path f = getTestRootPath(fc, "aDir/bDir/cDir");
+fc.mkdir(f, FileContext.DEFAULT_PERM, true);
+assertIsDirectory(fc.getFileStatus(f));
+assertIsDirectory(fc.getFileStatus(f.getParent()));
+assertIsDirectory(fc.getFileStatus(f.getParent().getParent()));
+  }
+
+  @Test
+  public void testMkdirRecursiveWithExistingFile() throws IOException {
+Path f = getTestRootPath(fc, "NonExistant3/aDir");
+fc.mkdir(f, FileContext.DEFAULT_PERM, true);
+assertIsDirectory(fc.getFileStatus(f));
+assertIsDirectory(fc.getFileStatus(f.getParent()));
+
+// create a sample file
+Path filePath = new Path(f.getParent(), "test.txt");
+createFile(fc, filePath);
+assertIsFile(filePath, fc.getFileStatus(filePath));
+
+// try creating another folder which conflicts with filePath
+Path dirPath = new Path(filePath, "bDir/cDir");
+try {
+  fc.mkdir(dirPath, FileContext.DEFAULT_PERM, true);
+  Assert.fail("Mkdir for " + dirPath
+  + " should have failed as a file was present");
+} catch(IOException e) {
+  // failed as expected
+}
+  }
+
+  @Test
+  public void testWithRename() throws IOException, InterruptedException {
+Path root = getTestRootPath(fc);
+Path f = new Path(root, "d1/d2/d3");
+fc.mkdir(f, FileContext.DEFAULT_PERM, true);
+assertIsDirectory(fc.getFileStatus(new Path(root, "d1")));
+assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2")));
+assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2/d3")));
+
+// create a sample file f.txt
+Path fPath = new Path(root, "d1/d2/f.txt");
+createFile(fc, fPath);
+assertIsFile(fPath, fc.getFileStatus(fPath));
+assertIsDirectory(fc.getFileStatus(new Path(root, "d1")));
+assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2")));
+assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2/d3")));
+
+// create a sample file f2.txt
+Path f2Path = new Path(getTestRootPath(fc), "d1/d2/d3/f2.txt");
+createFile(fc, f2Path);
+assertIsFile(fPath, fc.getFileStatus(f2Path));
+assertIsDirectory(fc.getFileStatus(new Path(root, "d1")));
+assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2")));
+assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2/d3")));
+
+//rename d1/d2/d3 d1/d4
+fc.rename(new Path(root, "d1/d2/d3"), new Path(root, "d1/d4"));
+assertIsDirectory(fc.getFileStatus(new Path(root, "d1")));
+ 

[28/50] [abbrv] hadoop git commit: Revert "HDFS-10431 Refactor and speedup TestAsyncDFSRename. Contributed by Xiaobing Zhou"

2016-06-07 Thread jianhe
Revert "HDFS-10431 Refactor and speedup TestAsyncDFSRename.  Contributed by 
Xiaobing Zhou"

This reverts commit f4b9bcd87c66a39f0c93983431630e9d1b6e36d3.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ee5912e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ee5912e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ee5912e

Branch: refs/heads/YARN-4757
Commit: 5ee5912ebd541d5b4c33ecd46dfdebe1e23b56c3
Parents: 8cf47d8
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:13 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:13 2016 -0700

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 233 +---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 563 +++
 2 files changed, 483 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ee5912e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index ddcf492..67262dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -29,16 +29,13 @@ import static 
org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 
@@ -46,21 +43,15 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -72,28 +63,21 @@ import com.google.common.collect.Lists;
  * */
 public class TestAsyncDFS {
   public static final Log LOG = LogFactory.getLog(TestAsyncDFS.class);
-  private final short replFactor = 1;
-  private final long blockSize = 512;
-  private long fileLen = blockSize * 3;
-  private final long seed = Time.now();
-  private final Random r = new Random(seed);
-  private final PermissionGenerator permGenerator = new PermissionGenerator(r);
-  private static final int NUM_TESTS = 50;
+  private static final int NUM_TESTS = 1000;
   private static final int NUM_NN_HANDLER = 10;
-  private static final int ASYNC_CALL_LIMIT = 1000;
+  private static final int ASYNC_CALL_LIMIT = 100;
 
   private Configuration conf;
   private MiniDFSCluster cluster;
   private FileSystem fs;
-  private AsyncDistributedFileSystem adfs;
 
   @Before
   public void setup() throws IOException {
 conf = new HdfsConfiguration();
 // explicitly turn on acl
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
-// explicitly turn on permission checking
-conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+// explicitly turn on ACL
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
 // set the limit of max async calls
 conf.setInt(CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
 ASYNC_CALL_LIMIT);
@@ -102,7 +86,6 @@ public class TestAsyncDFS {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
 cluster.waitActive();
 fs = FileSystem.get(conf);
-adfs = cluster.getFileSystem().getAsyncDistributedFileSystem();
   }
 
   

[14/50] [abbrv] hadoop git commit: HADOOP-13171. Add StorageStatistics to S3A; instrument some more operations. Contributed by Steve Loughran.

2016-06-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58a59f7/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
new file mode 100644
index 000..d29cb2f
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+/**
+ * Statistic which are collected in S3A.
+ * These statistics are available at a low level in {@link 
S3AStorageStatistics}
+ * and as metrics in {@link S3AInstrumentation}
+ */
+public enum Statistic {
+
+  DIRECTORIES_CREATED("directories_created",
+  "Total number of directories created through the object store."),
+  DIRECTORIES_DELETED("directories_deleted",
+  "Total number of directories deleted through the object store."),
+  FILES_COPIED("files_copied",
+  "Total number of files copied within the object store."),
+  FILES_COPIED_BYTES("files_copied_bytes",
+  "Total number of bytes copied within the object store."),
+  FILES_CREATED("files_created",
+  "Total number of files created through the object store."),
+  FILES_DELETED("files_deleted",
+  "Total number of files deleted from the object store."),
+  IGNORED_ERRORS("ignored_errors", "Errors caught and ignored"),
+  INVOCATION_COPY_FROM_LOCAL_FILE("invocations_copyfromlocalfile",
+  "Calls of copyFromLocalFile()"),
+  INVOCATION_EXISTS("invocations_exists",
+  "Calls of exists()"),
+  INVOCATION_GET_FILE_STATUS("invocations_getfilestatus",
+  "Calls of getFileStatus()"),
+  INVOCATION_GLOB_STATUS("invocations_globstatus",
+  "Calls of globStatus()"),
+  INVOCATION_IS_DIRECTORY("invocations_is_directory",
+  "Calls of isDirectory()"),
+  INVOCATION_IS_FILE("invocations_is_file",
+  "Calls of isFile()"),
+  INVOCATION_LIST_FILES("invocations_listfiles",
+  "Calls of listFiles()"),
+  INVOCATION_LIST_LOCATED_STATUS("invocations_listlocatedstatus",
+  "Calls of listLocatedStatus()"),
+  INVOCATION_LIST_STATUS("invocations_liststatus",
+  "Calls of listStatus()"),
+  INVOCATION_MKDIRS("invocations_mdkirs",
+  "Calls of mkdirs()"),
+  INVOCATION_RENAME("invocations_rename",
+  "Calls of rename()"),
+  OBJECT_COPY_REQUESTS("object_copy_requests", "Object copy requests"),
+  OBJECT_DELETE_REQUESTS("object_delete_requests", "Object delete requests"),
+  OBJECT_LIST_REQUESTS("object_list_requests",
+  "Number of object listings made"),
+  OBJECT_METADATA_REQUESTS("object_metadata_requests",
+  "Number of requests for object metadata"),
+  OBJECT_MULTIPART_UPLOAD_ABORTED("object_multipart_aborted",
+  "Object multipart upload aborted"),
+  OBJECT_PUT_REQUESTS("object_put_requests",
+  "Object put/multipart upload count"),
+  OBJECT_PUT_BYTES("object_put_bytes", "number of bytes uploaded"),
+  STREAM_ABORTED("streamAborted",
+  "Count of times the TCP stream was aborted"),
+  STREAM_BACKWARD_SEEK_OPERATIONS("streamBackwardSeekOperations",
+  "Number of executed seek operations which went backwards in a stream"),
+  STREAM_CLOSED("streamClosed", "Count of times the TCP stream was closed"),
+  STREAM_CLOSE_OPERATIONS("streamCloseOperations",
+  "Total count of times an attempt to close a data stream was made"),
+  STREAM_FORWARD_SEEK_OPERATIONS("streamForwardSeekOperations",
+  "Number of executed seek operations which went forward in a stream"),
+  STREAM_OPENED("streamOpened",
+  "Total count of times an input stream to object store was opened"),
+  STREAM_READ_EXCEPTIONS("streamReadExceptions",
+  "Number of seek operations invoked on input streams"),
+  STREAM_READ_FULLY_OPERATIONS("streamReadFullyOperations",
+  "count of readFully() operations in streams"),
+  STREAM_READ_OPERATIONS("streamReadOperations",
+  "Count of read() operations in streams"),
+  STREAM_READ_OPERATIONS_INCOMPLETE("streamReadOperationsIncomplete",
+  "Count of incomplete 

[31/50] [abbrv] hadoop git commit: Revert "HDFS-10346. Implement asynchronous setPermission/setOwner for DistributedFileSystem. Contributed by Xiaobing Zhou"

2016-06-07 Thread jianhe
Revert "HDFS-10346. Implement asynchronous setPermission/setOwner for 
DistributedFileSystem.  Contributed by  Xiaobing Zhou"

This reverts commit 7251bb922b20dae49c8c6854864095fb16d8cbd5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f23d5dfc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f23d5dfc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f23d5dfc

Branch: refs/heads/YARN-4757
Commit: f23d5dfc60a017187ae57f3667ac0e688877c2dd
Parents: e4450d4
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:17 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:17 2016 -0700

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../ClientNamenodeProtocolTranslatorPB.java |  39 +--
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 267 ++-
 .../apache/hadoop/hdfs/TestDFSPermission.java   |  29 +-
 4 files changed, 43 insertions(+), 351 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f23d5dfc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 4fe0861..356ae3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -27,7 +27,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.ipc.Client;
 
@@ -38,9 +37,6 @@ import com.google.common.util.concurrent.AbstractFuture;
  * This instance of this class is the way end-user code interacts
  * with a Hadoop DistributedFileSystem in an asynchronous manner.
  *
- * This class is unstable, so no guarantee is provided as to reliability,
- * stability or compatibility across any level of release granularity.
- *
  */
 @Unstable
 public class AsyncDistributedFileSystem {
@@ -115,59 +111,4 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
-
-  /**
-   * Set permission of a path.
-   *
-   * @param p
-   *  the path the permission is set to
-   * @param permission
-   *  the permission that is set to a path.
-   * @return an instance of Future, #get of which is invoked to wait for
-   * asynchronous call being finished.
-   */
-  public Future setPermission(Path p, final FsPermission permission)
-  throws IOException {
-dfs.getFsStatistics().incrementWriteOps(1);
-final Path absPath = dfs.fixRelativePart(p);
-final boolean isAsync = Client.isAsynchronousMode();
-Client.setAsynchronousMode(true);
-try {
-  dfs.getClient().setPermission(dfs.getPathName(absPath), permission);
-  return getReturnValue();
-} finally {
-  Client.setAsynchronousMode(isAsync);
-}
-  }
-
-  /**
-   * Set owner of a path (i.e. a file or a directory). The parameters username
-   * and groupname cannot both be null.
-   *
-   * @param p
-   *  The path
-   * @param username
-   *  If it is null, the original username remains unchanged.
-   * @param groupname
-   *  If it is null, the original groupname remains unchanged.
-   * @return an instance of Future, #get of which is invoked to wait for
-   * asynchronous call being finished.
-   */
-  public Future setOwner(Path p, String username, String groupname)
-  throws IOException {
-if (username == null && groupname == null) {
-  throw new IOException("username == null && groupname == null");
-}
-
-dfs.getFsStatistics().incrementWriteOps(1);
-final Path absPath = dfs.fixRelativePart(p);
-final boolean isAsync = Client.isAsynchronousMode();
-Client.setAsynchronousMode(true);
-try {
-  dfs.getClient().setOwner(dfs.getPathName(absPath), username, groupname);
-  return getReturnValue();
-} finally {
-  Client.setAsynchronousMode(isAsync);
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f23d5dfc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

[3/7] hadoop git commit: YARN-4844. Add getMemorySize/getVirtualCoresSize to o.a.h.y.api.records.Resource. (wangda)

2016-06-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19e57887/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 383088a..9572bf5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -324,7 +324,7 @@ public class TestLeafQueue {
 a.assignContainers(clusterResource, node_0, new ResourceLimits(
 clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
 assertEquals(
-(int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - 
(1*GB),
+(int)(node_0.getTotalResource().getMemorySize() * a.getCapacity()) - 
(1*GB),
 a.getMetrics().getAvailableMB());
   }
 
@@ -406,7 +406,7 @@ public class TestLeafQueue {
 assertEquals(1, a.getMetrics().getAppsSubmitted());
 assertEquals(1, a.getMetrics().getAppsPending());
 assertEquals(1, a.getUser(user_0).getActiveApplications());
-assertEquals(app_1.getAMResource().getMemory(), a.getMetrics()
+assertEquals(app_1.getAMResource().getMemorySize(), a.getMetrics()
 .getUsedAMResourceMB());
 assertEquals(app_1.getAMResource().getVirtualCores(), a.getMetrics()
 .getUsedAMResourceVCores());
@@ -514,9 +514,9 @@ public class TestLeafQueue {
 // Only 1 container
 a.assignContainers(clusterResource, node_0,
 new ResourceLimits(clusterResource), 
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
-assertEquals(1*GB, a.getUsedResources().getMemory());
-assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
-assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+assertEquals(1*GB, a.getUsedResources().getMemorySize());
+assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize());
+assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize());
 assertEquals(0*GB, a.getMetrics().getReservedMB());
 assertEquals(1*GB, a.getMetrics().getAllocatedMB());
 assertEquals(0*GB, a.getMetrics().getAvailableMB());
@@ -525,18 +525,18 @@ public class TestLeafQueue {
 // you can get one container more than user-limit
 a.assignContainers(clusterResource, node_0,
 new ResourceLimits(clusterResource), 
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
-assertEquals(2*GB, a.getUsedResources().getMemory());
-assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
-assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+assertEquals(2*GB, a.getUsedResources().getMemorySize());
+assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize());
+assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize());
 assertEquals(0*GB, a.getMetrics().getReservedMB());
 assertEquals(2*GB, a.getMetrics().getAllocatedMB());
 
 // Can't allocate 3rd due to user-limit
 a.assignContainers(clusterResource, node_0,
 new ResourceLimits(clusterResource), 
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
-assertEquals(2*GB, a.getUsedResources().getMemory());
-assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
-assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+assertEquals(2*GB, a.getUsedResources().getMemorySize());
+assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize());
+assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize());
 assertEquals(0*GB, a.getMetrics().getReservedMB());
 assertEquals(2*GB, a.getMetrics().getAllocatedMB());
 
@@ -544,18 +544,18 @@ public class TestLeafQueue {
 a.setUserLimitFactor(10);
 a.assignContainers(clusterResource, node_0,
 new ResourceLimits(clusterResource), 
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
-assertEquals(3*GB, a.getUsedResources().getMemory());
-assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
-assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+assertEquals(3*GB, a.getUsedResources().getMemorySize());
+assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize());
+assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize());
 assertEquals(0*GB, 

[7/7] hadoop git commit: YARN-4844. Add getMemorySize/getVirtualCoresSize to o.a.h.y.api.records.Resource. (wangda)

2016-06-07 Thread wangda
YARN-4844. Add getMemorySize/getVirtualCoresSize to 
o.a.h.y.api.records.Resource. (wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19e57887
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19e57887
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19e57887

Branch: refs/heads/branch-2.8
Commit: 19e578870d3245354dbd812186d39a86717a407f
Parents: 333490b
Author: Wangda Tan 
Authored: Tue Jun 7 12:41:50 2016 -0700
Committer: Wangda Tan 
Committed: Tue Jun 7 12:41:50 2016 -0700

--
 .../jobhistory/JobHistoryEventHandler.java  |   4 +-
 .../v2/app/job/impl/TaskAttemptImpl.java|   1 +
 .../v2/app/rm/RMContainerAllocator.java |  16 +-
 .../v2/app/rm/ResourceCalculatorUtils.java  |  17 +-
 .../hadoop/mapreduce/v2/app/TestRecovery.java   |   2 +-
 .../v2/app/job/impl/TestTaskAttempt.java|  19 +-
 .../v2/app/rm/TestRMContainerAllocator.java |   2 +-
 .../v2/app/rm/TestResourceCalculatorUtils.java  |   4 +-
 .../apache/hadoop/mapreduce/TypeConverter.java  |   6 +-
 .../org/apache/hadoop/mapreduce/JobStatus.java  |  18 +-
 .../jobhistory/NormalizedResourceEvent.java |   6 +-
 .../org/apache/hadoop/mapreduce/tools/CLI.java  |   7 +-
 .../apache/hadoop/mapred/JobClientUnitTest.java |   6 +-
 .../sls/scheduler/FairSchedulerMetrics.java |  60 +--
 .../sls/scheduler/ResourceSchedulerWrapper.java |  36 +-
 .../hadoop/yarn/api/records/Resource.java   |  34 +-
 .../src/main/proto/yarn_protos.proto|   4 +-
 .../distributedshell/ApplicationMaster.java |   6 +-
 .../applications/distributedshell/Client.java   |   4 +-
 .../yarn/client/api/impl/AMRMClientImpl.java|  16 +-
 .../apache/hadoop/yarn/client/cli/NodeCLI.java  |   4 +-
 .../apache/hadoop/yarn/client/cli/TopCLI.java   |   4 +-
 .../yarn/client/api/impl/TestYarnClient.java|   2 +-
 .../api/records/impl/pb/ResourcePBImpl.java |  32 +-
 .../resource/DefaultResourceCalculator.java |  36 +-
 .../resource/DominantResourceCalculator.java|  46 +-
 .../yarn/util/resource/ResourceCalculator.java  |  10 +-
 .../hadoop/yarn/util/resource/Resources.java|  64 ++-
 .../yarn/util/resource/TestResources.java   |   8 +-
 .../hadoop/yarn/server/utils/BuilderUtils.java  |   2 +-
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |  10 +-
 .../yarn/server/webapp/dao/ContainerInfo.java   |  10 +-
 .../hadoop/yarn/TestYarnServerApiClasses.java   |   6 +-
 .../server/nodemanager/ContainerExecutor.java   |   2 +-
 .../container/ContainerImpl.java|   4 +-
 .../monitor/ContainersMonitorImpl.java  |   4 +-
 .../nodemanager/metrics/NodeManagerMetrics.java |  12 +-
 .../nodemanager/webapp/dao/ContainerInfo.java   |   2 +-
 .../nodemanager/TestNodeStatusUpdater.java  |   4 +-
 .../monitor/TestContainersMonitor.java  |   2 +-
 .../yarn/server/resourcemanager/RMNMInfo.java   |   4 +-
 .../server/resourcemanager/RMServerUtils.java   |  14 +-
 .../resourcemanager/ResourceTrackerService.java |   2 +-
 .../metrics/SystemMetricsPublisher.java |   2 +-
 .../ProportionalCapacityPreemptionPolicy.java   |  12 +-
 .../AbstractSchedulerPlanFollower.java  |   2 +-
 .../reservation/CapacityOverTimePolicy.java |   6 +-
 .../reservation/ReservationSystemUtil.java  |   2 +-
 .../planning/StageEarliestStartByDemand.java|   2 +-
 .../rmcontainer/RMContainerImpl.java|   2 +-
 .../scheduler/AbstractYarnScheduler.java|  14 +-
 .../resourcemanager/scheduler/QueueMetrics.java |  61 +--
 .../scheduler/SchedulerApplicationAttempt.java  |   6 +-
 .../scheduler/SchedulerUtils.java   |   8 +-
 .../scheduler/capacity/CSQueueMetrics.java  |  24 +-
 .../capacity/CapacityHeadroomProvider.java  |   2 +-
 .../CapacitySchedulerConfiguration.java |   6 +-
 .../scheduler/capacity/LeafQueue.java   |   2 +-
 .../allocator/RegularContainerAllocator.java|   2 +-
 .../scheduler/fair/FSParentQueue.java   |   2 +-
 .../resourcemanager/scheduler/fair/FSQueue.java |  12 +-
 .../scheduler/fair/FSQueueMetrics.java  |  43 +-
 .../scheduler/fair/FairScheduler.java   |   6 +-
 .../fair/policies/ComputeFairShares.java|  18 +-
 .../DominantResourceFairnessPolicy.java |  10 +-
 .../fair/policies/FairSharePolicy.java  |  20 +-
 .../scheduler/fair/policies/FifoPolicy.java |   8 +-
 .../scheduler/fifo/FifoScheduler.java   |  21 +-
 .../scheduler/policy/FairOrderingPolicy.java|   5 +-
 .../webapp/DefaultSchedulerPage.java|   4 +-
 .../webapp/FairSchedulerAppsBlock.java  |   2 +-
 .../resourcemanager/webapp/RMWebServices.java   |   8 +-
 .../resourcemanager/webapp/dao/AppInfo.java |  20 +-
 .../webapp/dao/FairSchedulerInfo.java   |   4 +-
 

[5/7] hadoop git commit: YARN-4844. Add getMemorySize/getVirtualCoresSize to o.a.h.y.api.records.Resource. (wangda)

2016-06-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19e57887/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index a0e14111..a6093b3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -142,11 +142,11 @@ public class FifoScheduler extends
   QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
   queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
   queueInfo.setCapacity(1.0f);
-  if (clusterResource.getMemory() == 0) {
+  if (clusterResource.getMemorySize() == 0) {
 queueInfo.setCurrentCapacity(0.0f);
   } else {
-queueInfo.setCurrentCapacity((float) usedResource.getMemory()
-/ clusterResource.getMemory());
+queueInfo.setCurrentCapacity((float) usedResource.getMemorySize()
+/ clusterResource.getMemorySize());
   }
   queueInfo.setMaximumCapacity(1.0f);
   queueInfo.setChildQueues(new ArrayList());
@@ -671,7 +671,7 @@ public class FifoScheduler extends
 return assignedContainers;
   }
 
-  private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp 
application, 
+  private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp 
application,
   Priority priority, int assignableContainers, 
   ResourceRequest request, NodeType type) {
 LOG.debug("assignContainers:" +
@@ -682,14 +682,11 @@ public class FifoScheduler extends
 " request=" + request + " type=" + type);
 Resource capability = request.getCapability();
 
-int availableContainers = 
-  node.getAvailableResource().getMemory() / capability.getMemory(); // 
TODO: A buggy
-// 
application
-// 
with this
-// 
zero would
-// 
crash the
-// 
scheduler.
-int assignedContainers = 
+int availableContainers =
+(int) (node.getAvailableResource().getMemorySize() / capability
+.getMemorySize());
+// TODO: A buggy application with this zero would crash the scheduler.
+int assignedContainers =
   Math.min(assignableContainers, availableContainers);
 
 if (assignedContainers > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19e57887/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java
index 04cd53a..3cfcd7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java
@@ -23,7 +23,6 @@ import java.util.*;
 import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 
 /**
@@ -67,10 +66,10 @@ public class FairOrderingPolicy extends AbstractCom
 
   private double getMagnitude(SchedulableEntity r) {
 double mag = r.getSchedulingResourceUsage().getCachedUsed(
-  

[4/7] hadoop git commit: YARN-4844. Add getMemorySize/getVirtualCoresSize to o.a.h.y.api.records.Resource. (wangda)

2016-06-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19e57887/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
index 6c0d95f..43dd7ba 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
@@ -290,12 +290,12 @@ public class TestApplicationLimits {
 
 // Assert in metrics
 assertEquals(queue.getMetrics().getAMResourceLimitMB(),
-amResourceLimit.getMemory());
+amResourceLimit.getMemorySize());
 assertEquals(queue.getMetrics().getAMResourceLimitVCores(),
 amResourceLimit.getVirtualCores());
 
 assertEquals(
-(int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),
+(int)(clusterResource.getMemorySize() * queue.getAbsoluteCapacity()),
 queue.getMetrics().getAvailableMB()
 );
 
@@ -310,7 +310,7 @@ public class TestApplicationLimits {
   Resource.newInstance(96*GB, 1));
 
 assertEquals(
-(int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),
+(int)(clusterResource.getMemorySize() * queue.getAbsoluteCapacity()),
 queue.getMetrics().getAvailableMB()
 );
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19e57887/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
index 1569a12..2a1c642 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
@@ -171,13 +171,13 @@ public class TestApplicationPriority {
 7, 2 * GB, nm1);
 
 Assert.assertEquals(7, allocated1.size());
-Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
+Assert.assertEquals(2 * GB, 
allocated1.get(0).getResource().getMemorySize());
 
 // check node report, 15 GB used (1 AM and 7 containers) and 1 GB available
 SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
 nm1.getNodeId());
-Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemory());
-Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemory());
+Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemorySize());
+Assert.assertEquals(1 * GB, 
report_nm1.getAvailableResource().getMemorySize());
 
 // Submit the second app App2 with priority 8 (Higher than App1)
 Priority appPriority2 = Priority.newInstance(8);
@@ -189,8 +189,8 @@ public class TestApplicationPriority {
 
 // check node report, 16 GB used and 0 GB available
 report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
-Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory());
-Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemorySize());
+Assert.assertEquals(0 * GB, 
report_nm1.getAvailableResource().getMemorySize());
 
 // get scheduler
 CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
@@ -210,8 +210,8 @@ public class TestApplicationPriority {
 
 // check node report, 12 GB used and 4 GB available
 report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
-

[6/7] hadoop git commit: YARN-4844. Add getMemorySize/getVirtualCoresSize to o.a.h.y.api.records.Resource. (wangda)

2016-06-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19e57887/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
index cad3b2e..ff4519d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
@@ -59,8 +59,8 @@ public class AppInfo {
   protected long elapsedTime;
   protected String applicationTags;
   protected int priority;
-  private int allocatedCpuVcores;
-  private int allocatedMemoryMB;
+  private long allocatedCpuVcores;
+  private long allocatedMemoryMB;
   protected boolean unmanagedApplication;
   private String appNodeLabelExpression;
   private String amNodeLabelExpression;
@@ -100,7 +100,7 @@ public class AppInfo {
 allocatedCpuVcores = app.getApplicationResourceUsageReport()
 .getUsedResources().getVirtualCores();
 allocatedMemoryMB = app.getApplicationResourceUsageReport()
-.getUsedResources().getMemory();
+.getUsedResources().getMemorySize();
   }
 }
 progress = app.getProgress() * 100; // in percent
@@ -152,11 +152,11 @@ public class AppInfo {
 return runningContainers;
   }
 
-  public int getAllocatedCpuVcores() {
+  public long getAllocatedCpuVcores() {
 return allocatedCpuVcores;
   }
 
-  public int getAllocatedMemoryMB() {
+  public long getAllocatedMemoryMB() {
 return allocatedMemoryMB;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19e57887/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
index f127f9c..1a5ee85 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
@@ -36,8 +36,8 @@ import org.apache.hadoop.yarn.util.Times;
 public class ContainerInfo {
 
   protected String containerId;
-  protected int allocatedMB;
-  protected int allocatedVCores;
+  protected long allocatedMB;
+  protected long allocatedVCores;
   protected String assignedNodeId;
   protected int priority;
   protected long startedTime;
@@ -57,7 +57,7 @@ public class ContainerInfo {
   public ContainerInfo(ContainerReport container) {
 containerId = container.getContainerId().toString();
 if (container.getAllocatedResource() != null) {
-  allocatedMB = container.getAllocatedResource().getMemory();
+  allocatedMB = container.getAllocatedResource().getMemorySize();
   allocatedVCores = container.getAllocatedResource().getVirtualCores();
 }
 if (container.getAssignedNode() != null) {
@@ -79,11 +79,11 @@ public class ContainerInfo {
 return containerId;
   }
 
-  public int getAllocatedMB() {
+  public long getAllocatedMB() {
 return allocatedMB;
   }
 
-  public int getAllocatedVCores() {
+  public long getAllocatedVCores() {
 return allocatedVCores;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19e57887/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
index c9427dd..e25547d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
+++ 

[2/7] hadoop git commit: YARN-4844. Add getMemorySize/getVirtualCoresSize to o.a.h.y.api.records.Resource. (wangda)

2016-06-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19e57887/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
index db28d2d..547a55d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
@@ -263,88 +263,88 @@ public class TestReservations {
 // Only AM
 a.assignContainers(clusterResource, node_0,
 new ResourceLimits(clusterResource), 
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
-assertEquals(2 * GB, a.getUsedResources().getMemory());
-assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
+assertEquals(2 * GB, a.getUsedResources().getMemorySize());
+assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
 assertEquals(0 * GB, a.getMetrics().getReservedMB());
 assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
 assertEquals(22 * GB, a.getMetrics().getAvailableMB());
-assertEquals(2 * GB, node_0.getUsedResource().getMemory());
-assertEquals(0 * GB, node_1.getUsedResource().getMemory());
-assertEquals(0 * GB, node_2.getUsedResource().getMemory());
+assertEquals(2 * GB, node_0.getUsedResource().getMemorySize());
+assertEquals(0 * GB, node_1.getUsedResource().getMemorySize());
+assertEquals(0 * GB, node_2.getUsedResource().getMemorySize());
 
 // Only 1 map - simulating reduce
 a.assignContainers(clusterResource, node_0,
 new ResourceLimits(clusterResource), 
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
-assertEquals(5 * GB, a.getUsedResources().getMemory());
-assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory());
+assertEquals(5 * GB, a.getUsedResources().getMemorySize());
+assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize());
 assertEquals(0 * GB, a.getMetrics().getReservedMB());
 assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
 assertEquals(19 * GB, a.getMetrics().getAvailableMB());
-assertEquals(5 * GB, node_0.getUsedResource().getMemory());
-assertEquals(0 * GB, node_1.getUsedResource().getMemory());
-assertEquals(0 * GB, node_2.getUsedResource().getMemory());
+assertEquals(5 * GB, node_0.getUsedResource().getMemorySize());
+assertEquals(0 * GB, node_1.getUsedResource().getMemorySize());
+assertEquals(0 * GB, node_2.getUsedResource().getMemorySize());
 
 // Only 1 map to other node - simulating reduce
 a.assignContainers(clusterResource, node_1,
 new ResourceLimits(clusterResource), 
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
-assertEquals(8 * GB, a.getUsedResources().getMemory());
-assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+assertEquals(8 * GB, a.getUsedResources().getMemorySize());
+assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
 assertEquals(0 * GB, a.getMetrics().getReservedMB());
 assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
 assertEquals(16 * GB, a.getMetrics().getAvailableMB());
-assertEquals(16 * GB, app_0.getHeadroom().getMemory());
+assertEquals(16 * GB, app_0.getHeadroom().getMemorySize());
 assertEquals(null, node_0.getReservedContainer());
-assertEquals(5 * GB, node_0.getUsedResource().getMemory());
-assertEquals(3 * GB, node_1.getUsedResource().getMemory());
-assertEquals(0 * GB, node_2.getUsedResource().getMemory());
+assertEquals(5 * GB, node_0.getUsedResource().getMemorySize());
+assertEquals(3 * GB, node_1.getUsedResource().getMemorySize());
+assertEquals(0 * GB, node_2.getUsedResource().getMemorySize());
 assertEquals(2, app_0.getTotalRequiredResources(priorityReduce));
 
 // try to assign reducer (5G on node 0 and should reserve)
 a.assignContainers(clusterResource, node_0,
 new ResourceLimits(clusterResource), 
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
-assertEquals(13 * GB, a.getUsedResources().getMemory());
-assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+assertEquals(13 * GB, a.getUsedResources().getMemorySize());
+assertEquals(8 * GB, 

[1/7] hadoop git commit: YARN-4844. Add getMemorySize/getVirtualCoresSize to o.a.h.y.api.records.Resource. (wangda)

2016-06-07 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 333490ba4 -> 19e578870


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19e57887/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 469d97b..06bbfe9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -228,10 +228,10 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 Assert.assertEquals(10, scheduler.continuousSchedulingSleepMs);
 Assert.assertEquals(5000, scheduler.nodeLocalityDelayMs);
 Assert.assertEquals(5000, scheduler.rackLocalityDelayMs);
-Assert.assertEquals(1024, 
scheduler.getMaximumResourceCapability().getMemory());
-Assert.assertEquals(512, 
scheduler.getMinimumResourceCapability().getMemory());
+Assert.assertEquals(1024, 
scheduler.getMaximumResourceCapability().getMemorySize());
+Assert.assertEquals(512, 
scheduler.getMinimumResourceCapability().getMemorySize());
 Assert.assertEquals(128, 
-  scheduler.getIncrementResourceCapability().getMemory());
+  scheduler.getIncrementResourceCapability().getMemorySize());
   }
   
   @Test  
@@ -246,9 +246,9 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
   FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2);
 scheduler.init(conf);
 scheduler.reinitialize(conf, null);
-Assert.assertEquals(256, 
scheduler.getMinimumResourceCapability().getMemory());
+Assert.assertEquals(256, 
scheduler.getMinimumResourceCapability().getMemorySize());
 Assert.assertEquals(1, 
scheduler.getMinimumResourceCapability().getVirtualCores());
-Assert.assertEquals(512, 
scheduler.getIncrementResourceCapability().getMemory());
+Assert.assertEquals(512, 
scheduler.getIncrementResourceCapability().getMemorySize());
 Assert.assertEquals(2, 
scheduler.getIncrementResourceCapability().getVirtualCores());
   }  
   
@@ -264,9 +264,9 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
   FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2);
 scheduler.init(conf);
 scheduler.reinitialize(conf, null);
-Assert.assertEquals(0, 
scheduler.getMinimumResourceCapability().getMemory());
+Assert.assertEquals(0, 
scheduler.getMinimumResourceCapability().getMemorySize());
 Assert.assertEquals(0, 
scheduler.getMinimumResourceCapability().getVirtualCores());
-Assert.assertEquals(512, 
scheduler.getIncrementResourceCapability().getMemory());
+Assert.assertEquals(512, 
scheduler.getIncrementResourceCapability().getMemorySize());
 Assert.assertEquals(2, 
scheduler.getIncrementResourceCapability().getVirtualCores());
   }  
   
@@ -282,19 +282,19 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1");
 NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
 scheduler.handle(nodeEvent1);
-assertEquals(1024, scheduler.getClusterResource().getMemory());
+assertEquals(1024, scheduler.getClusterResource().getMemorySize());
 
 // Add another node
 RMNode node2 =
 MockNodes.newNodeInfo(1, Resources.createResource(512), 2, 
"127.0.0.2");
 NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
 scheduler.handle(nodeEvent2);
-assertEquals(1536, scheduler.getClusterResource().getMemory());
+assertEquals(1536, scheduler.getClusterResource().getMemorySize());
 
 // Remove the first node
 NodeRemovedSchedulerEvent nodeEvent3 = new 
NodeRemovedSchedulerEvent(node1);
 scheduler.handle(nodeEvent3);
-assertEquals(512, scheduler.getClusterResource().getMemory());
+assertEquals(512, scheduler.getClusterResource().getMemorySize());
   }
 
   @Test
@@ -325,9 +325,9 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 
 // Divided three ways - between the two queues and the default queue
 for (FSLeafQueue p : queues) {
-  assertEquals(3414, p.getFairShare().getMemory());
+  assertEquals(3414, 

hadoop git commit: HDFS-10468. HDFS read ends up ignoring an interrupt. Contributed by Jing Zhao

2016-06-07 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 19eb997f6 -> 8b34040cb


HDFS-10468. HDFS read ends up ignoring an interrupt. Contributed by Jing Zhao

(cherry picked from commit be34e85e682880f46eee0310bf00ecc7d39cd5bd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b34040c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b34040c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b34040c

Branch: refs/heads/branch-2
Commit: 8b34040cb905a3244e8d688e3c5713da557139f1
Parents: 19eb997
Author: Jing Zhao 
Authored: Tue Jun 7 10:48:21 2016 -0700
Committer: Jing Zhao 
Committed: Tue Jun 7 10:52:33 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 36 ++--
 .../java/org/apache/hadoop/hdfs/TestRead.java   | 87 
 .../server/datanode/SimulatedFSDataset.java |  4 +-
 3 files changed, 119 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b34040c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 4a83a53..fb8d207 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs;
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
+import java.nio.channels.ClosedByInterruptException;
 import java.util.AbstractMap;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -307,7 +309,7 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(waitTime);
 } catch (InterruptedException e) {
-  throw new IOException(
+  throw new InterruptedIOException(
   "Interrupted while getting the last block length.");
 }
   }
@@ -382,6 +384,7 @@ public class DFSInputStream extends FSInputStream
   return n;
 }
   } catch (IOException ioe) {
+checkInterrupted(ioe);
 if (ioe instanceof RemoteException) {
   if (((RemoteException) ioe).unwrapRemoteException() instanceof
   ReplicaNotFoundException) {
@@ -417,7 +420,8 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(500); // delay between retries.
 } catch (InterruptedException e) {
-  throw new IOException("Interrupted while getting the length.");
+  throw new InterruptedIOException(
+  "Interrupted while getting the length.");
 }
   }
 
@@ -663,6 +667,7 @@ public class DFSInputStream extends FSInputStream
 }
 return chosenNode;
   } catch (IOException ex) {
+checkInterrupted(ex);
 if (ex instanceof InvalidEncryptionKeyException && 
refetchEncryptionKey > 0) {
   DFSClient.LOG.info("Will fetch a new encryption key and retry, "
   + "encryption key was invalid when connecting to " + targetAddr
@@ -684,6 +689,15 @@ public class DFSInputStream extends FSInputStream
 }
   }
 
+  private void checkInterrupted(IOException e) throws IOException {
+if (Thread.currentThread().isInterrupted() &&
+(e instanceof ClosedByInterruptException ||
+e instanceof InterruptedIOException)) {
+  DFSClient.LOG.debug("The reading thread has been interrupted.", e);
+  throw e;
+}
+  }
+
   protected BlockReader getBlockReader(LocatedBlock targetBlock,
   long offsetInBlock, long length, InetSocketAddress targetAddr,
   StorageType storageType, DatanodeInfo datanode) throws IOException {
@@ -950,6 +964,7 @@ public class DFSInputStream extends FSInputStream
 } catch (ChecksumException ce) {
   throw ce;
 } catch (IOException e) {
+  checkInterrupted(e);
   if (retries == 1) {
 DFSClient.LOG.warn("DFS Read", e);
   }
@@ -1064,9 +1079,12 @@ public class DFSInputStream extends FSInputStream
   // expanding time window for each failure
   timeWindow * (failures + 1) *
   ThreadLocalRandom.current().nextDouble();
-  DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " 
IOException, will wait for " + waitTime + " msec.");
+  DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
+  " IOException, 

hadoop git commit: HDFS-10468. HDFS read ends up ignoring an interrupt. Contributed by Jing Zhao

2016-06-07 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk c14c1b298 -> be34e85e6


HDFS-10468. HDFS read ends up ignoring an interrupt. Contributed by Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be34e85e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be34e85e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be34e85e

Branch: refs/heads/trunk
Commit: be34e85e682880f46eee0310bf00ecc7d39cd5bd
Parents: c14c1b2
Author: Jing Zhao 
Authored: Tue Jun 7 10:48:21 2016 -0700
Committer: Jing Zhao 
Committed: Tue Jun 7 10:48:21 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 36 ++--
 .../java/org/apache/hadoop/hdfs/TestRead.java   | 87 
 .../server/datanode/SimulatedFSDataset.java |  4 +-
 3 files changed, 119 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be34e85e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 2ed0abd..7f32a56 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs;
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
+import java.nio.channels.ClosedByInterruptException;
 import java.util.AbstractMap;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -304,7 +306,7 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(waitTime);
 } catch (InterruptedException e) {
-  throw new IOException(
+  throw new InterruptedIOException(
   "Interrupted while getting the last block length.");
 }
   }
@@ -379,6 +381,7 @@ public class DFSInputStream extends FSInputStream
   return n;
 }
   } catch (IOException ioe) {
+checkInterrupted(ioe);
 if (ioe instanceof RemoteException) {
   if (((RemoteException) ioe).unwrapRemoteException() instanceof
   ReplicaNotFoundException) {
@@ -414,7 +417,8 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(500); // delay between retries.
 } catch (InterruptedException e) {
-  throw new IOException("Interrupted while getting the length.");
+  throw new InterruptedIOException(
+  "Interrupted while getting the length.");
 }
   }
 
@@ -660,6 +664,7 @@ public class DFSInputStream extends FSInputStream
 }
 return chosenNode;
   } catch (IOException ex) {
+checkInterrupted(ex);
 if (ex instanceof InvalidEncryptionKeyException && 
refetchEncryptionKey > 0) {
   DFSClient.LOG.info("Will fetch a new encryption key and retry, "
   + "encryption key was invalid when connecting to " + targetAddr
@@ -681,6 +686,15 @@ public class DFSInputStream extends FSInputStream
 }
   }
 
+  private void checkInterrupted(IOException e) throws IOException {
+if (Thread.currentThread().isInterrupted() &&
+(e instanceof ClosedByInterruptException ||
+e instanceof InterruptedIOException)) {
+  DFSClient.LOG.debug("The reading thread has been interrupted.", e);
+  throw e;
+}
+  }
+
   protected BlockReader getBlockReader(LocatedBlock targetBlock,
   long offsetInBlock, long length, InetSocketAddress targetAddr,
   StorageType storageType, DatanodeInfo datanode) throws IOException {
@@ -948,6 +962,7 @@ public class DFSInputStream extends FSInputStream
 } catch (ChecksumException ce) {
   throw ce;
 } catch (IOException e) {
+  checkInterrupted(e);
   if (retries == 1) {
 DFSClient.LOG.warn("DFS Read", e);
   }
@@ -1044,9 +1059,12 @@ public class DFSInputStream extends FSInputStream
   // expanding time window for each failure
   timeWindow * (failures + 1) *
   ThreadLocalRandom.current().nextDouble();
-  DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " 
IOException, will wait for " + waitTime + " msec.");
+  DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
+  " IOException, will wait for " + waitTime + " msec.");
   

hadoop git commit: HDFS-10458. getFileEncryptionInfo should return quickly for non-encrypted cluster.

2016-06-07 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 7fe08d762 -> 1b00cebcd


HDFS-10458. getFileEncryptionInfo should return quickly for non-encrypted 
cluster.

(cherry picked from commit 9b68eda2746702246c7f4b4abe412c974ba56339)
(cherry picked from commit 6de9213df111a9a4ed875db995d67af72d08a798)
(cherry picked from commit 06e38c835d0ad9619d4bea662f2dd7d0f62007a9)
(cherry picked from commit 0274636529cfac4d64582a7c21631aebbf4deb1b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b00cebc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b00cebc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b00cebc

Branch: refs/heads/branch-2.6
Commit: 1b00cebcdb378ab510e81dc85c62674af764da53
Parents: 7fe08d7
Author: Zhe Zhang 
Authored: Tue Jun 7 10:42:19 2016 -0700
Committer: Zhe Zhang 
Committed: Tue Jun 7 10:42:19 2016 -0700

--
 .../server/namenode/EncryptionZoneManager.java  | 27 +---
 .../hdfs/server/namenode/FSDirectory.java   |  2 +-
 2 files changed, 25 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b00cebc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 90b99ce..16ff864 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -97,7 +97,7 @@ public class EncryptionZoneManager {
 }
   }
 
-  private final TreeMap encryptionZones;
+  private TreeMap encryptionZones = null;
   private final FSDirectory dir;
   private final int maxListEncryptionZonesResponses;
 
@@ -108,7 +108,6 @@ public class EncryptionZoneManager {
*/
   public EncryptionZoneManager(FSDirectory dir, Configuration conf) {
 this.dir = dir;
-encryptionZones = new TreeMap();
 maxListEncryptionZonesResponses = conf.getInt(
 DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
 DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT
@@ -145,6 +144,9 @@ public class EncryptionZoneManager {
   CipherSuite suite, CryptoProtocolVersion version, String keyName) {
 final EncryptionZoneInt ez = new EncryptionZoneInt(
 inodeId, suite, version, keyName);
+if (encryptionZones == null) {
+  encryptionZones = new TreeMap();
+}
 encryptionZones.put(inodeId, ez);
   }
 
@@ -155,7 +157,9 @@ public class EncryptionZoneManager {
*/
   void removeEncryptionZone(Long inodeId) {
 assert dir.hasWriteLock();
-encryptionZones.remove(inodeId);
+if (hasCreatedEncryptionZone()) {
+  encryptionZones.remove(inodeId);
+}
   }
 
   /**
@@ -203,6 +207,9 @@ public class EncryptionZoneManager {
   private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) {
 assert dir.hasReadLock();
 Preconditions.checkNotNull(iip);
+if (!hasCreatedEncryptionZone()) {
+  return null;
+}
 final INode[] inodes = iip.getINodes();
 for (int i = inodes.length - 1; i >= 0; i--) {
   final INode inode = inodes[i];
@@ -337,6 +344,10 @@ public class EncryptionZoneManager {
   BatchedListEntries listEncryptionZones(long prevId)
   throws IOException {
 assert dir.hasReadLock();
+if (!hasCreatedEncryptionZone()) {
+  final List emptyZones = Lists.newArrayList();
+  return new BatchedListEntries(emptyZones, false);
+}
 NavigableMap tailMap = encryptionZones.tailMap
 (prevId, false);
 final int numResponses = Math.min(maxListEncryptionZonesResponses,
@@ -371,4 +382,14 @@ public class EncryptionZoneManager {
 final boolean hasMore = (numResponses < tailMap.size());
 return new BatchedListEntries(zones, hasMore);
   }
+
+  /**
+   * @return Whether there has been any attempt to create an encryption zone in
+   * the cluster at all. If not, it is safe to quickly return null when
+   * checking the encryption information of any file or directory in the
+   * cluster.
+   */
+  public boolean hasCreatedEncryptionZone() {
+return encryptionZones != null;
+  }
 }


[31/47] hadoop git commit: Revert "Revert "HDFS-10224. Implement asynchronous rename for DistributedFileSystem. Contributed by Xiaobing Zhou""

2016-06-07 Thread aengineer
Revert "Revert "HDFS-10224. Implement asynchronous rename for 
DistributedFileSystem.  Contributed by Xiaobing Zhou""

This reverts commit 106234d873c60fa52cd0d812fb1cdc0c6b998a6d.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eded3d10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eded3d10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eded3d10

Branch: refs/heads/HDFS-1312
Commit: eded3d109e4c5225d8c5cd3c2d82e7ac93841263
Parents: 106234d
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:28:21 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:28:21 2016 +0800

--
 .../java/org/apache/hadoop/fs/FileSystem.java   |   1 -
 .../main/java/org/apache/hadoop/ipc/Client.java |  11 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  34 ++-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |   2 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java | 110 
 .../hadoop/hdfs/DistributedFileSystem.java  |  22 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  45 +++-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 258 +++
 8 files changed, 463 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eded3d10/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 0ecd8b7..9e13a7a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1252,7 +1252,6 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   /**
* Renames Path src to Path dst
* 
-   * Fails if src is a file and dst is a directory.
* Fails if src is a directory and dst is a file.
* Fails if the parent of dst does not exist or is a file.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eded3d10/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index f206861..d59aeb89 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -119,7 +119,8 @@ public class Client implements AutoCloseable {
 
   private static final ThreadLocal callId = new 
ThreadLocal();
   private static final ThreadLocal retryCount = new 
ThreadLocal();
-  private static final ThreadLocal returnValue = new 
ThreadLocal<>();
+  private static final ThreadLocal
+  RETURN_RPC_RESPONSE = new ThreadLocal<>();
   private static final ThreadLocal asynchronousMode =
   new ThreadLocal() {
 @Override
@@ -130,8 +131,8 @@ public class Client implements AutoCloseable {
 
   @SuppressWarnings("unchecked")
   @Unstable
-  public static  Future getReturnValue() {
-return (Future) returnValue.get();
+  public static  Future getReturnRpcResponse() {
+return (Future) RETURN_RPC_RESPONSE.get();
   }
 
   /** Set call id and retry count for the next call. */
@@ -1396,7 +1397,7 @@ public class Client implements AutoCloseable {
 }
   };
 
-  returnValue.set(returnFuture);
+  RETURN_RPC_RESPONSE.set(returnFuture);
   return null;
 } else {
   return getRpcResponse(call, connection);
@@ -1410,7 +1411,7 @@ public class Client implements AutoCloseable {
*  synchronous mode.
*/
   @Unstable
-  static boolean isAsynchronousMode() {
+  public static boolean isAsynchronousMode() {
 return asynchronousMode.get();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eded3d10/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 071e2e8..8fcdb78 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 

[06/47] hadoop git commit: HDFS-10367. TestDFSShell.testMoveWithTargetPortEmpty fails with Address bind exception. Contributed by Brahma Reddy Battula.

2016-06-07 Thread aengineer
HDFS-10367. TestDFSShell.testMoveWithTargetPortEmpty fails with Address bind 
exception. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aadb77e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aadb77e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aadb77e4

Branch: refs/heads/HDFS-1312
Commit: aadb77e412ab9d4ad05a0bd8b37d547ba5adad03
Parents: 99675e0
Author: Masatake Iwasaki 
Authored: Thu Jun 2 20:55:04 2016 +0900
Committer: Masatake Iwasaki 
Committed: Thu Jun 2 20:55:04 2016 +0900

--
 .../org/apache/hadoop/net/ServerSocketUtil.java | 39 
 .../org/apache/hadoop/hdfs/TestDFSShell.java|  4 +-
 2 files changed, 42 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aadb77e4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
index a3e1fff..023c1ed 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
@@ -63,4 +63,43 @@ public class ServerSocketUtil {
 }
   }
 
+  /**
+   * Check whether port is available or not.
+   *
+   * @param port given port
+   * @return
+   */
+  private static boolean isPortAvailable(int port) {
+try (ServerSocket s = new ServerSocket(port)) {
+  return true;
+} catch (IOException e) {
+  return false;
+}
+  }
+
+  /**
+   * Wait till the port available.
+   *
+   * @param port given port
+   * @param retries number of retries for given port
+   * @return
+   * @throws InterruptedException
+   * @throws IOException
+   */
+  public static int waitForPort(int port, int retries)
+  throws InterruptedException, IOException {
+int tries = 0;
+while (true) {
+  if (isPortAvailable(port)) {
+return port;
+  } else {
+tries++;
+if (tries >= retries) {
+  throw new IOException(
+  "Port is already in use; giving up after " + tries + " times.");
+}
+Thread.sleep(1000);
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aadb77e4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index fc3de75..e31de13 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.compress.BZip2Codec;
 import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -567,7 +568,8 @@ public class TestDFSShell {
   cluster = new MiniDFSCluster.Builder(conf)
   .format(true)
   .numDataNodes(2)
-  .nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)
+  .nameNodePort(ServerSocketUtil.waitForPort(
+  HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT, 10))
   .waitSafeMode(true)
   .build();
   FileSystem srcFs = cluster.getFileSystem();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/47] hadoop git commit: Revert "HADOOP-13226 Support async call retry and failover."

2016-06-07 Thread aengineer
Revert "HADOOP-13226 Support async call retry and failover."

This reverts commit 83f2f78c118a7e52aba5104bd97b0acedc96be7b.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5360da8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5360da8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5360da8b

Branch: refs/heads/HDFS-1312
Commit: 5360da8bd9f720384860f411bee081aef13b4bd4
Parents: 47e0321
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:09 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:09 2016 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   8 +-
 .../hadoop/io/retry/AsyncCallHandler.java   | 321 ---
 .../org/apache/hadoop/io/retry/CallReturn.java  |  75 -
 .../hadoop/io/retry/RetryInvocationHandler.java | 134 ++--
 .../apache/hadoop/io/retry/RetryPolicies.java   |   4 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |  25 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  13 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  17 +-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |  10 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |   7 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  42 +--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java|  43 ++-
 .../apache/hadoop/hdfs/TestAsyncHDFSWithHA.java | 181 ---
 .../hdfs/server/namenode/ha/HATestUtil.java |   9 +-
 14 files changed, 114 insertions(+), 775 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5360da8b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index a644aa5..ab8673b 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -345,13 +345,7 @@

  
 
- 
- 
-   
-   
-   
- 
-
+ 
  



http://git-wip-us.apache.org/repos/asf/hadoop/blob/5360da8b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
deleted file mode 100644
index 5a03b03..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.retry;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.concurrent.AsyncGet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.reflect.Method;
-import java.util.LinkedList;
-import java.util.Queue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-
-/** Handle async calls. */
-@InterfaceAudience.Private
-public class AsyncCallHandler {
-  static final Logger LOG = LoggerFactory.getLogger(AsyncCallHandler.class);
-
-  private static final ThreadLocal
-  LOWER_LAYER_ASYNC_RETURN = new ThreadLocal<>();
-  private static final ThreadLocal>
-  ASYNC_RETURN = new ThreadLocal<>();
-
-  /** @return the async return value from {@link AsyncCallHandler}. */
-  @InterfaceStability.Unstable
-  

[22/47] hadoop git commit: HDFS-10481. HTTPFS server should correctly impersonate as end user to open file. Contributed by Xiao Chen.

2016-06-07 Thread aengineer
HDFS-10481. HTTPFS server should correctly impersonate as end user to open 
file. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47e0321e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47e0321e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47e0321e

Branch: refs/heads/HDFS-1312
Commit: 47e0321ee91149331e6ae72e7caa41d1de078b6c
Parents: 99a771c
Author: Andrew Wang 
Authored: Fri Jun 3 17:21:17 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 17:21:17 2016 -0700

--
 .../hadoop/fs/http/server/HttpFSServer.java | 218 ++-
 1 file changed, 114 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47e0321e/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index b7b63fa..db4692a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -79,6 +79,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 import java.security.AccessControlException;
+import java.security.PrivilegedExceptionAction;
 import java.text.MessageFormat;
 import java.util.EnumSet;
 import java.util.List;
@@ -94,6 +95,7 @@ import java.util.Map;
 @InterfaceAudience.Private
 public class HttpFSServer {
   private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
+  private static final Logger LOG = 
LoggerFactory.getLogger(HttpFSServer.class);
 
   /**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem 
for the effective
@@ -205,115 +207,123 @@ public class HttpFSServer {
 MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
 MDC.put("hostname", request.getRemoteAddr());
 switch (op.value()) {
-  case OPEN: {
-//Invoking the command directly using an unmanaged FileSystem that is
-// released by the FileSystemReleaseFilter
-FSOperations.FSOpen command = new FSOperations.FSOpen(path);
-FileSystem fs = createFileSystem(user);
-InputStream is = command.execute(fs);
-Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
-Long len = params.get(LenParam.NAME, LenParam.class);
-AUDIT_LOG.info("[{}] offset [{}] len [{}]",
-   new Object[]{path, offset, len});
-InputStreamEntity entity = new InputStreamEntity(is, offset, len);
-response =
+case OPEN: {
+  //Invoking the command directly using an unmanaged FileSystem that is
+  // released by the FileSystemReleaseFilter
+  final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
+  final FileSystem fs = createFileSystem(user);
+  InputStream is = null;
+  UserGroupInformation ugi = UserGroupInformation
+  .createProxyUser(user.getShortUserName(),
+  UserGroupInformation.getLoginUser());
+  try {
+is = ugi.doAs(new PrivilegedExceptionAction() {
+  @Override
+  public InputStream run() throws Exception {
+return command.execute(fs);
+  }
+});
+  } catch (InterruptedException ie) {
+LOG.info("Open interrupted.", ie);
+Thread.currentThread().interrupt();
+  }
+  Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
+  Long len = params.get(LenParam.NAME, LenParam.class);
+  AUDIT_LOG.info("[{}] offset [{}] len [{}]",
+  new Object[] { path, offset, len });
+  InputStreamEntity entity = new InputStreamEntity(is, offset, len);
+  response =
   Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
-break;
-  }
-  case GETFILESTATUS: {
-FSOperations.FSFileStatus command =
-  new FSOperations.FSFileStatus(path);
-Map json = fsExecute(user, command);
-AUDIT_LOG.info("[{}]", path);
-response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-break;
-  }
-  case LISTSTATUS: {
-String filter = params.get(FilterParam.NAME, FilterParam.class);
-FSOperations.FSListStatus command = new FSOperations.FSListStatus(
-  path, filter);
-Map json = fsExecute(user, command);
-AUDIT_LOG.info("[{}] filter [{}]", path,
-   (filter != 

[09/47] hadoop git commit: HDFS-10471. DFSAdmin#SetQuotaCommand's help msg is not correct. Contributed by Yiqun Lin.

2016-06-07 Thread aengineer
HDFS-10471. DFSAdmin#SetQuotaCommand's help msg is not correct. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1df6f573
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1df6f573
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1df6f573

Branch: refs/heads/HDFS-1312
Commit: 1df6f5735c9d85e644d99d3ebfc4459490657004
Parents: ead61c4
Author: Akira Ajisaka 
Authored: Fri Jun 3 04:10:32 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Jun 3 04:10:32 2016 +0900

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 42 ++--
 .../src/test/resources/testHDFSConf.xml |  2 +-
 2 files changed, 22 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1df6f573/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 08d3da5..45c4952 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -165,15 +165,15 @@ public class DFSAdmin extends FsShell {
 private static final String USAGE =
   "-"+NAME+"  ...";
 private static final String DESCRIPTION = 
-  "-setQuota  ...: " +
-  "Set the quota  for each directory .\n" + 
-  "\t\tThe directory quota is a long integer that puts a hard limit\n" +
-  "\t\ton the number of names in the directory tree\n" +
-  "\t\tFor each directory, attempt to set the quota. An error will be 
reported if\n" +
-  "\t\t1. N is not a positive integer, or\n" +
-  "\t\t2. User is not an administrator, or\n" +
-  "\t\t3. The directory does not exist or is a file.\n" +
-  "\t\tNote: A quota of 1 would force the directory to remain empty.\n";
+"-setQuota  ...: " +
+"Set the quota  for each directory .\n" +
+"\t\tThe directory quota is a long integer that puts a hard limit\n" +
+"\t\ton the number of names in the directory tree\n" +
+"\t\tFor each directory, attempt to set the quota. An error will be 
reported if\n" +
+"\t\t1. quota is not a positive integer, or\n" +
+"\t\t2. User is not an administrator, or\n" +
+"\t\t3. The directory does not exist or is a file.\n" +
+"\t\tNote: A quota of 1 would force the directory to remain empty.\n";
 
 private final long quota; // the quota to be set
 
@@ -263,18 +263,18 @@ public class DFSAdmin extends FsShell {
 private static final String USAGE =
   "-"+NAME+"  [-storageType ] ...";
 private static final String DESCRIPTION = USAGE + ": " +
-  "Set the space quota  for each directory .\n" +
-  "\t\tThe space quota is a long integer that puts a hard limit\n" +
-  "\t\ton the total size of all the files under the directory tree.\n" +
-  "\t\tThe extra space required for replication is also counted. E.g.\n" +
-  "\t\ta 1GB file with replication of 3 consumes 3GB of the quota.\n\n" +
-  "\t\tQuota can also be specified with a binary prefix for terabytes,\n" +
-  "\t\tpetabytes etc (e.g. 50t is 50TB, 5m is 5MB, 3p is 3PB).\n" + 
-  "\t\tFor each directory, attempt to set the quota. An error will be 
reported if\n" +
-  "\t\t1. N is not a positive integer, or\n" +
-  "\t\t2. user is not an administrator, or\n" +
-  "\t\t3. the directory does not exist or is a file.\n" +
-  "\t\tThe storage type specific quota is set when -storageType option is 
specified.\n";
+"Set the space quota  for each directory .\n" +
+"\t\tThe space quota is a long integer that puts a hard limit\n" +
+"\t\ton the total size of all the files under the directory tree.\n" +
+"\t\tThe extra space required for replication is also counted. E.g.\n" 
+
+"\t\ta 1GB file with replication of 3 consumes 3GB of the quota.\n\n" +
+"\t\tQuota can also be specified with a binary prefix for 
terabytes,\n" +
+"\t\tpetabytes etc (e.g. 50t is 50TB, 5m is 5MB, 3p is 3PB).\n" +
+"\t\tFor each directory, attempt to set the quota. An error will be 
reported if\n" +
+"\t\t1. quota is not a positive integer or zero, or\n" +
+"\t\t2. user is not an administrator, or\n" +
+"\t\t3. the directory does not exist or is a file.\n" +
+"\t\tThe storage type specific quota is set when -storageType option 
is specified.\n";
 
 private long quota; // the quota to 

[07/47] hadoop git commit: YARN-5180. Allow ResourceRequest to specify an enforceExecutionType flag. (asuresh)

2016-06-07 Thread aengineer
YARN-5180. Allow ResourceRequest to specify an enforceExecutionType flag. 
(asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc26601d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc26601d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc26601d

Branch: refs/heads/HDFS-1312
Commit: dc26601d8fe27a4223a50601bf7522cc42e8e2f3
Parents: aadb77e
Author: Arun Suresh 
Authored: Thu Jun 2 05:18:01 2016 -0700
Committer: Arun Suresh 
Committed: Thu Jun 2 09:01:02 2016 -0700

--
 .../v2/app/rm/RMContainerRequestor.java |   4 +-
 .../yarn/api/records/ExecutionTypeRequest.java  | 124 +++
 .../yarn/api/records/ResourceRequest.java   |  34 ++---
 .../src/main/proto/yarn_protos.proto|   7 +-
 .../api/impl/TestDistributedScheduling.java |   9 +-
 .../impl/pb/ExecutionTypeRequestPBImpl.java |  93 ++
 .../yarn/api/records/impl/pb/ProtoUtils.java|  15 +++
 .../records/impl/pb/ResourceRequestPBImpl.java  |  52 
 .../hadoop/yarn/api/TestPBImplRecords.java  |   2 +
 .../nodemanager/scheduler/LocalScheduler.java   |   3 +-
 .../scheduler/TestLocalScheduler.java   |  11 +-
 .../TestDistributedSchedulingService.java   |  17 ++-
 12 files changed, 323 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc26601d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
index 7030712..f4579ab 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
@@ -41,6 +41,7 @@ import 
org.apache.hadoop.mapreduce.v2.app.client.ClientService;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -462,7 +463,8 @@ public abstract class RMContainerRequestor extends 
RMCommunicator {
   remoteRequest.setCapability(capability);
   remoteRequest.setNumContainers(0);
   remoteRequest.setNodeLabelExpression(nodeLabelExpression);
-  remoteRequest.setExecutionType(executionType);
+  remoteRequest.setExecutionTypeRequest(
+  ExecutionTypeRequest.newInstance(executionType, true));
   reqMap.put(capability, remoteRequest);
 }
 remoteRequest.setNumContainers(remoteRequest.getNumContainers() + 1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc26601d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ExecutionTypeRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ExecutionTypeRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ExecutionTypeRequest.java
new file mode 100644
index 000..f553a44
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ExecutionTypeRequest.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * 

[16/47] hadoop git commit: YARN-5165. Fix NoOvercommitPolicy to take advantage of RLE representation of plan. (Carlo Curino via asuresh)

2016-06-07 Thread aengineer
YARN-5165. Fix NoOvercommitPolicy to take advantage of RLE representation of 
plan. (Carlo Curino via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db54670e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db54670e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db54670e

Branch: refs/heads/HDFS-1312
Commit: db54670e83a84c1d7deff2c225725687cf9e5f14
Parents: f10ebc6
Author: Arun Suresh 
Authored: Fri Jun 3 14:49:32 2016 -0700
Committer: Arun Suresh 
Committed: Fri Jun 3 14:49:32 2016 -0700

--
 .../reservation/NoOverCommitPolicy.java | 38 
 .../planning/TestSimpleCapacityReplanner.java   |  2 +-
 2 files changed, 15 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db54670e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
index 119520b..814d4b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/NoOverCommitPolicy.java
@@ -21,11 +21,9 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.reservation;
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ReservationId;
-import org.apache.hadoop.yarn.api.records.Resource;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.MismatchedUserException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.ResourceOverCommitException;
-import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
  * This policy enforce a simple physical cluster capacity constraints, by
@@ -52,29 +50,21 @@ public class NoOverCommitPolicy implements SharingPolicy {
   + oldReservation.getUser() + " != " + reservation.getUser());
 }
 
-long startTime = reservation.getStartTime();
-long endTime = reservation.getEndTime();
-long step = plan.getStep();
+RLESparseResourceAllocation available = plan.getAvailableResourceOverTime(
+reservation.getUser(), reservation.getReservationId(),
+reservation.getStartTime(), reservation.getEndTime());
 
-// for every instant in time, check we are respecting cluster capacity
-for (long t = startTime; t < endTime; t += step) {
-  Resource currExistingAllocTot = plan.getTotalCommittedResources(t);
-  Resource currNewAlloc = reservation.getResourcesAtTime(t);
-  Resource currOldAlloc = Resource.newInstance(0, 0);
-  if (oldReservation != null) {
-oldReservation.getResourcesAtTime(t);
-  }
-  // check the cluster is never over committed
-  // currExistingAllocTot + currNewAlloc - currOldAlloc >
-  // capPlan.getTotalCapacity()
-  if (Resources.greaterThan(plan.getResourceCalculator(), plan
-  .getTotalCapacity(), Resources.subtract(
-  Resources.add(currExistingAllocTot, currNewAlloc), currOldAlloc),
-  plan.getTotalCapacity())) {
-throw new ResourceOverCommitException("Resources at time " + t
-+ " would be overcommitted by " + "accepting reservation: "
-+ reservation.getReservationId());
-  }
+// test the reservation does not exceed what is available
+try {
+  RLESparseResourceAllocation
+  .merge(plan.getResourceCalculator(), plan.getTotalCapacity(),
+  available, reservation.getResourcesOverTime(),
+  RLESparseResourceAllocation.RLEOperator.subtractTestNonNegative,
+  reservation.getStartTime(), reservation.getEndTime());
+} catch (PlanningException p) {
+  throw new ResourceOverCommitException(
+  "Resources at time " + " would be overcommitted by "
+  + "accepting reservation: " + reservation.getReservationId());

[19/47] hadoop git commit: HADOOP-13155. Implement TokenRenewer to renew and cancel delegation tokens in KMS. Contributed by Xiao Chen.

2016-06-07 Thread aengineer
HADOOP-13155. Implement TokenRenewer to renew and cancel delegation tokens in 
KMS. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/713cb718
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/713cb718
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/713cb718

Branch: refs/heads/HDFS-1312
Commit: 713cb71820ad94a5436f35824d07aa12fcba5cc6
Parents: d82bc85
Author: Andrew Wang 
Authored: Fri Jun 3 16:48:54 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 16:48:54 2016 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  30 +++-
 .../crypto/key/kms/KMSClientProvider.java   | 158 +--
 .../key/kms/LoadBalancingKMSClientProvider.java |  21 +++
 .../java/org/apache/hadoop/util/KMSUtil.java|  76 +
 ...rg.apache.hadoop.security.token.TokenRenewer |  14 ++
 .../key/kms/server/KMSAuthenticationFilter.java |   2 +-
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 127 +--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  38 ++---
 8 files changed, 410 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/713cb718/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 2f237c6..9212cbc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -34,7 +34,7 @@ public class KeyProviderDelegationTokenExtension extends
   new DefaultDelegationTokenExtension();
 
   /**
-   * DelegationTokenExtension is a type of Extension that exposes methods to 
+   * DelegationTokenExtension is a type of Extension that exposes methods
* needed to work with Delegation Tokens.
*/  
   public interface DelegationTokenExtension extends 
@@ -49,8 +49,23 @@ public class KeyProviderDelegationTokenExtension extends
  * @return list of new delegation tokens
  * @throws IOException thrown if IOException if an IO error occurs.
  */
-public Token[] addDelegationTokens(final String renewer, 
+Token[] addDelegationTokens(final String renewer,
 Credentials credentials) throws IOException;
+
+/**
+ * Renews the given token.
+ * @param token The token to be renewed.
+ * @return The token's lifetime after renewal, or 0 if it can't be renewed.
+ * @throws IOException
+ */
+long renewDelegationToken(final Token token) throws IOException;
+
+/**
+ * Cancels the given token.
+ * @param token The token to be cancelled.
+ * @throws IOException
+ */
+Void cancelDelegationToken(final Token token) throws IOException;
   }
   
   /**
@@ -65,7 +80,16 @@ public class KeyProviderDelegationTokenExtension extends
 Credentials credentials) {
   return null;
 }
-
+
+@Override
+public long renewDelegationToken(final Token token) throws IOException {
+  return 0;
+}
+
+@Override
+public Void cancelDelegationToken(final Token token) throws IOException 
{
+  return null;
+}
   }
 
   private KeyProviderDelegationTokenExtension(KeyProvider keyProvider,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/713cb718/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 32ef09c..f4103b4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -38,8 +38,11 @@ import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenRenewer;
+import 

[47/47] hadoop git commit: Merge branch 'trunk' into HDFS-1312

2016-06-07 Thread aengineer
Merge branch 'trunk' into HDFS-1312


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76a1391d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76a1391d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76a1391d

Branch: refs/heads/HDFS-1312
Commit: 76a1391d53373aa99251f669aab35d9e54e866f4
Parents: 7c79136 c14c1b2
Author: Anu Engineer 
Authored: Tue Jun 7 10:31:57 2016 -0700
Committer: Anu Engineer 
Committed: Tue Jun 7 10:31:57 2016 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  30 +-
 .../crypto/key/kms/KMSClientProvider.java   | 158 +-
 .../key/kms/LoadBalancingKMSClientProvider.java |  21 +
 .../hadoop/crypto/key/kms/ValueQueue.java   |   2 +-
 .../org/apache/hadoop/fs/LocalDirAllocator.java | 153 +++---
 .../hadoop/metrics2/impl/MetricsSystemImpl.java |   1 +
 .../metrics2/lib/DefaultMetricsSystem.java  |   9 +
 .../hadoop/security/LdapGroupsMapping.java  |  12 +
 .../hadoop/security/token/DtFileOperations.java |  23 +
 .../hadoop/security/token/DtUtilShell.java  |  47 +-
 .../DelegationTokenAuthenticationHandler.java   |   7 +
 .../web/DelegationTokenAuthenticator.java   |  19 +
 .../java/org/apache/hadoop/util/KMSUtil.java|  76 +++
 ...rg.apache.hadoop.security.token.TokenRenewer |  14 +
 .../src/main/resources/core-default.xml |  24 +
 .../hadoop-common/src/site/markdown/Metrics.md  |   1 +
 .../src/site/markdown/SingleCluster.md.vm   |   3 +-
 .../hadoop/fs/contract/ContractTestUtils.java   | 420 +++
 .../org/apache/hadoop/net/ServerSocketUtil.java |  39 ++
 .../hadoop/security/TestLdapGroupsMapping.java  | 140 +
 .../hadoop/security/token/TestDtUtilShell.java  |  24 +
 .../delegation/web/TestWebDelegationToken.java  | 114 +++-
 .../key/kms/server/KMSAuthenticationFilter.java |   2 +-
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 127 -
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  38 +-
 .../hadoop/fs/http/server/HttpFSServer.java | 218 
 .../server/blockmanagement/BlockManager.java|   4 +
 .../PendingReconstructionBlocks.java|  16 +-
 .../server/namenode/EncryptionZoneManager.java  |  35 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |   2 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  11 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   5 +
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  42 +-
 .../src/main/webapps/hdfs/explorer.html |  25 +-
 .../src/main/webapps/hdfs/explorer.js   |  53 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java|   4 +-
 .../hadoop/hdfs/TestDFSUpgradeFromImage.java|  16 +-
 .../TestPendingReconstruction.java  |  20 +-
 .../src/test/resources/testHDFSConf.xml |   2 +-
 .../hadoop/mapred/LocalContainerLauncher.java   |  28 +
 .../v2/app/job/impl/TaskAttemptImpl.java|   5 +-
 .../v2/app/launcher/ContainerLauncherEvent.java |  21 +-
 .../v2/app/launcher/ContainerLauncherImpl.java  |  19 +-
 .../v2/app/rm/RMContainerRequestor.java |   4 +-
 .../v2/app/launcher/TestContainerLauncher.java  |  10 +-
 .../app/launcher/TestContainerLauncherImpl.java |   8 +
 .../hadoop/mapred/ResourceMgrDelegate.java  |   5 +-
 .../hadoop/mapred/TestClientRedirect.java   |   2 +-
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  | 119 +
 .../org/apache/hadoop/fs/s3a/Constants.java |  11 +-
 .../fs/s3a/ProgressableProgressListener.java|  94 
 .../hadoop/fs/s3a/S3AFastOutputStream.java  |  65 +--
 .../org/apache/hadoop/fs/s3a/S3AFileStatus.java |   7 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 529 ++-
 .../hadoop/fs/s3a/S3AInstrumentation.java   | 218 +---
 .../apache/hadoop/fs/s3a/S3AOutputStream.java   |  98 +---
 .../hadoop/fs/s3a/S3AStorageStatistics.java | 104 
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  48 ++
 .../org/apache/hadoop/fs/s3a/Statistic.java | 143 +
 .../src/site/markdown/tools/hadoop-aws/index.md |  53 +-
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  | 103 
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |  76 +++
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  | 226 +++-
 .../apache/hadoop/fs/s3a/TestS3AEncryption.java | 104 
 .../TestS3AEncryptionAlgorithmPropagation.java  |  82 +++
 .../s3a/TestS3AEncryptionFastOutputStream.java  |  35 ++
 .../hadoop/fs/s3a/TestS3AFileOperationCost.java | 191 +++
 .../fs/s3a/TestS3AFileSystemContract.java   |   1 -
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 130 +
 .../fs/s3a/scale/TestS3ADeleteManyFiles.java|  10 +-
 .../s3a/scale/TestS3ADirectoryPerformance.java  | 189 +++
 .../scale/TestS3AInputStreamPerformance.java|   6 +-
 .../src/test/resources/log4j.properties |   4 +-
 

[36/47] hadoop git commit: Revert "Revert "HDFS-10431 Refactor and speedup TestAsyncDFSRename. Contributed by Xiaobing Zhou""

2016-06-07 Thread aengineer
Revert "Revert "HDFS-10431 Refactor and speedup TestAsyncDFSRename.  
Contributed by Xiaobing Zhou""

This reverts commit 5ee5912ebd541d5b4c33ecd46dfdebe1e23b56c3.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db41e6d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db41e6d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db41e6d2

Branch: refs/heads/HDFS-1312
Commit: db41e6d285a3b425ffd7c11c7baa8253c7929439
Parents: b3d81f3
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:34 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:34 2016 +0800

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 233 +++-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 563 ---
 2 files changed, 313 insertions(+), 483 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db41e6d2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index 67262dd..ddcf492 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -29,13 +29,16 @@ import static 
org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Random;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 
@@ -43,15 +46,21 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -63,21 +72,28 @@ import com.google.common.collect.Lists;
  * */
 public class TestAsyncDFS {
   public static final Log LOG = LogFactory.getLog(TestAsyncDFS.class);
-  private static final int NUM_TESTS = 1000;
+  private final short replFactor = 1;
+  private final long blockSize = 512;
+  private long fileLen = blockSize * 3;
+  private final long seed = Time.now();
+  private final Random r = new Random(seed);
+  private final PermissionGenerator permGenerator = new PermissionGenerator(r);
+  private static final int NUM_TESTS = 50;
   private static final int NUM_NN_HANDLER = 10;
-  private static final int ASYNC_CALL_LIMIT = 100;
+  private static final int ASYNC_CALL_LIMIT = 1000;
 
   private Configuration conf;
   private MiniDFSCluster cluster;
   private FileSystem fs;
+  private AsyncDistributedFileSystem adfs;
 
   @Before
   public void setup() throws IOException {
 conf = new HdfsConfiguration();
 // explicitly turn on acl
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
-// explicitly turn on ACL
-conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+// explicitly turn on permission checking
+conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
 // set the limit of max async calls
 conf.setInt(CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
 ASYNC_CALL_LIMIT);
@@ -86,6 +102,7 @@ public class TestAsyncDFS {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
 cluster.waitActive();
 fs = FileSystem.get(conf);
+adfs = 

[15/47] hadoop git commit: YARN-5098. Fixed ResourceManager's DelegationTokenRenewer to replace expiring system-tokens if RM stops and only restarts after a long time. Contributed by Jian He.

2016-06-07 Thread aengineer
YARN-5098. Fixed ResourceManager's DelegationTokenRenewer to replace expiring 
system-tokens if RM stops and only restarts after a long time. Contributed by 
Jian He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f10ebc67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f10ebc67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f10ebc67

Branch: refs/heads/HDFS-1312
Commit: f10ebc67f57a4a2e3cc916c41154ab9b6a4635c9
Parents: 99cc439
Author: Vinod Kumar Vavilapalli 
Authored: Fri Jun 3 13:00:07 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Fri Jun 3 13:00:07 2016 -0700

--
 .../security/DelegationTokenRenewer.java| 27 --
 .../security/TestDelegationTokenRenewer.java| 98 
 2 files changed, 118 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f10ebc67/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index fd12f11..4177ee2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.service.AbstractService;
@@ -459,6 +460,18 @@ public class DelegationTokenRenewer extends 
AbstractService {
   try {
 renewToken(dttr);
   } catch (IOException ioe) {
+if (ioe instanceof SecretManager.InvalidToken
+&& dttr.maxDate < Time.now()
+&& evt instanceof DelegationTokenRenewerAppRecoverEvent
+&& token.getKind().equals(HDFS_DELEGATION_KIND)) {
+  LOG.info("Failed to renew hdfs token " + dttr
+  + " on recovery as it expired, requesting new hdfs token for 
"
+  + applicationId + ", user=" + evt.getUser(), ioe);
+  requestNewHdfsDelegationTokenAsProxyUser(
+  Arrays.asList(applicationId), evt.getUser(),
+  evt.shouldCancelAtEnd());
+  continue;
+}
 throw new IOException("Failed to renew token: " + dttr.token, ioe);
   }
 }
@@ -485,7 +498,8 @@ public class DelegationTokenRenewer extends AbstractService 
{
 }
 
 if (!hasHdfsToken) {
-  requestNewHdfsDelegationToken(Arrays.asList(applicationId), 
evt.getUser(),
+  requestNewHdfsDelegationTokenAsProxyUser(Arrays.asList(applicationId),
+  evt.getUser(),
 shouldCancelAtEnd);
 }
   }
@@ -586,8 +600,7 @@ public class DelegationTokenRenewer extends AbstractService 
{
 } catch (InterruptedException e) {
   throw new IOException(e);
 }
-LOG.info("Renewed delegation-token= [" + dttr + "], for "
-+ dttr.referringAppIds);
+LOG.info("Renewed delegation-token= [" + dttr + "]");
   }
 
   // Request new hdfs token if the token is about to expire, and remove the old
@@ -625,12 +638,12 @@ public class DelegationTokenRenewer extends 
AbstractService {
 }
   }
   LOG.info("Token= (" + dttr + ") is expiring, request new token.");
-  requestNewHdfsDelegationToken(applicationIds, dttr.user,
+  requestNewHdfsDelegationTokenAsProxyUser(applicationIds, dttr.user,
   dttr.shouldCancelAtEnd);
 }
   }
 
-  private void requestNewHdfsDelegationToken(
+  private void requestNewHdfsDelegationTokenAsProxyUser(
   Collection referringAppIds,
   String user, boolean shouldCancelAtEnd) throws IOException,
   InterruptedException {
@@ -912,8 +925,8 @@ public class DelegationTokenRenewer extends AbstractService 
{
   // 

[34/47] hadoop git commit: Revert "Revert "HADOOP-13168. Support Future.get with timeout in ipc async calls.""

2016-06-07 Thread aengineer
Revert "Revert "HADOOP-13168. Support Future.get with timeout in ipc async 
calls.""

This reverts commit e4450d47f19131818e1c040b6bd8d85ae8250475.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/574dcd34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/574dcd34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/574dcd34

Branch: refs/heads/HDFS-1312
Commit: 574dcd34c0da1903d25e37dc5757642a584dc3d0
Parents: cba9a01
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:23 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:23 2016 +0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 119 --
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  62 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  60 +
 .../hadoop/util/concurrent/AsyncGetFuture.java  |  73 +++
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 124 +++
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  24 +---
 .../ClientNamenodeProtocolTranslatorPB.java |  33 ++---
 7 files changed, 310 insertions(+), 185 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/574dcd34/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 9be4649..d1d5b17 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -18,46 +18,10 @@
 
 package org.apache.hadoop.ipc;
 
-import static org.apache.hadoop.ipc.RpcConstants.*;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.EOFException;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.Hashtable;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import javax.net.SocketFactory;
-import javax.security.sasl.Sasl;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.CodedOutputStream;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -93,14 +57,25 @@ import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.AsyncGet;
+import org.apache.hadoop.util.concurrent.AsyncGetFuture;
 import org.apache.htrace.core.Span;
 import org.apache.htrace.core.Tracer;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AbstractFuture;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.CodedOutputStream;
+import javax.net.SocketFactory;
+import javax.security.sasl.Sasl;
+import java.io.*;
+import java.net.*;
+import java.security.PrivilegedExceptionAction;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
+import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 
 /** A client for an IPC 

[38/47] hadoop git commit: Revert "Revert "HADOOP-13226 Support async call retry and failover.""

2016-06-07 Thread aengineer
Revert "Revert "HADOOP-13226 Support async call retry and failover.""

This reverts commit 5360da8bd9f720384860f411bee081aef13b4bd4.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35f255b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35f255b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35f255b0

Branch: refs/heads/HDFS-1312
Commit: 35f255b03b1bb5c94063ec1818af1d253ceee991
Parents: 7e7b1ae
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:43 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:43 2016 +0800

--
 .../dev-support/findbugsExcludeFile.xml |   8 +-
 .../hadoop/io/retry/AsyncCallHandler.java   | 321 +++
 .../org/apache/hadoop/io/retry/CallReturn.java  |  75 +
 .../hadoop/io/retry/RetryInvocationHandler.java | 134 ++--
 .../apache/hadoop/io/retry/RetryPolicies.java   |   4 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |  25 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  13 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  17 +-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |  10 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |   7 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  42 +--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java|  43 +--
 .../apache/hadoop/hdfs/TestAsyncHDFSWithHA.java | 181 +++
 .../hdfs/server/namenode/ha/HATestUtil.java |   9 +-
 14 files changed, 775 insertions(+), 114 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35f255b0/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index ab8673b..a644aa5 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -345,7 +345,13 @@

  
 
- 
+ 
+ 
+   
+   
+   
+ 
+
  



http://git-wip-us.apache.org/repos/asf/hadoop/blob/35f255b0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
new file mode 100644
index 000..5a03b03
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.retry;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.AsyncGet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Method;
+import java.util.LinkedList;
+import java.util.Queue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+
+/** Handle async calls. */
+@InterfaceAudience.Private
+public class AsyncCallHandler {
+  static final Logger LOG = LoggerFactory.getLogger(AsyncCallHandler.class);
+
+  private static final ThreadLocal
+  LOWER_LAYER_ASYNC_RETURN = new ThreadLocal<>();
+  private static final ThreadLocal>
+  ASYNC_RETURN = new ThreadLocal<>();
+
+  /** @return the async return value from {@link AsyncCallHandler}. */
+  

[12/47] hadoop git commit: HADOOP-13171. Add StorageStatistics to S3A; instrument some more operations. Contributed by Steve Loughran.

2016-06-07 Thread aengineer
HADOOP-13171. Add StorageStatistics to S3A; instrument some more operations. 
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c58a59f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c58a59f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c58a59f7

Branch: refs/heads/HDFS-1312
Commit: c58a59f7081d55dd2108545ebf9ee48cf43ca944
Parents: 97e2449
Author: Chris Nauroth 
Authored: Fri Jun 3 08:55:33 2016 -0700
Committer: Chris Nauroth 
Committed: Fri Jun 3 08:55:33 2016 -0700

--
 .../hadoop/fs/contract/ContractTestUtils.java   | 420 +++
 .../fs/s3a/ProgressableProgressListener.java|  94 
 .../hadoop/fs/s3a/S3AFastOutputStream.java  |  65 +--
 .../org/apache/hadoop/fs/s3a/S3AFileStatus.java |   7 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 507 ++-
 .../hadoop/fs/s3a/S3AInstrumentation.java   | 218 +---
 .../apache/hadoop/fs/s3a/S3AOutputStream.java   |  98 +---
 .../hadoop/fs/s3a/S3AStorageStatistics.java | 104 
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  48 ++
 .../org/apache/hadoop/fs/s3a/Statistic.java | 143 ++
 .../src/site/markdown/tools/hadoop-aws/index.md |  12 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  | 153 ++
 .../hadoop/fs/s3a/TestS3AFileOperationCost.java | 191 +++
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 154 ++
 .../fs/s3a/scale/TestS3ADeleteManyFiles.java|  10 +-
 .../s3a/scale/TestS3ADirectoryPerformance.java  | 189 +++
 .../scale/TestS3AInputStreamPerformance.java|   6 +-
 .../src/test/resources/log4j.properties |   4 +-
 18 files changed, 1984 insertions(+), 439 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58a59f7/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index 6343d40..20ba075 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -22,7 +22,9 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Assert;
 import org.junit.internal.AssumptionViolatedException;
@@ -34,8 +36,14 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
 import java.util.Properties;
+import java.util.Set;
 import java.util.UUID;
 
 /**
@@ -892,4 +900,416 @@ public class ContractTestUtils extends Assert {
   fs.delete(objectPath, false);
 }
   }
+
+  /**
+   * Make times more readable, by adding a "," every three digits.
+   * @param nanos nanos or other large number
+   * @return a string for logging
+   */
+  public static String toHuman(long nanos) {
+return String.format(Locale.ENGLISH, "%,d", nanos);
+  }
+
+  /**
+   * Log the bandwidth of a timer as inferred from the number of
+   * bytes processed.
+   * @param timer timer
+   * @param bytes bytes processed in the time period
+   */
+  public static void bandwidth(NanoTimer timer, long bytes) {
+LOG.info("Bandwidth = {}  MB/S",
+timer.bandwidthDescription(bytes));
+  }
+
+  /**
+   * Work out the bandwidth in MB/s.
+   * @param bytes bytes
+   * @param durationNS duration in nanos
+   * @return the number of megabytes/second of the recorded operation
+   */
+  public static double bandwidthMBs(long bytes, long durationNS) {
+return (bytes * 1000.0) / durationNS;
+  }
+
+  /**
+   * Recursively create a directory tree.
+   * Return the details about the created tree. The files and directories
+   * are those created under the path, not the base directory created. That
+   * is retrievable via {@link TreeScanResults#getBasePath()}.
+   * @param fs filesystem
+   * @param current parent dir
+   * @param depth depth of directory tree
+   * @param width width: subdirs per entry
+   * @param files number of files per entry
+   * 

[24/47] hadoop git commit: Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. Contributed by Xiaobing Zhou."

2016-06-07 Thread aengineer
Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. Contributed by 
Xiaobing Zhou."

This reverts commit 21890c4239b6a82fd6aab3454ce396efe7b5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cf47d85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cf47d85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cf47d85

Branch: refs/heads/HDFS-1312
Commit: 8cf47d8589badfc07ef4bca3328a420c7c68abbd
Parents: 5360da8
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:12 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:12 2016 -0700

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 36 +++-
 1 file changed, 35 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf47d85/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index c7615a9..ddcf492 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -34,6 +34,7 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -45,16 +46,19 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -441,7 +445,7 @@ public class TestAsyncDFS {
 for (int i = 0; i < NUM_TESTS; i++) {
   assertTrue(fs.exists(dsts[i]));
   FsPermission fsPerm = new FsPermission(permissions[i]);
-  fs.access(dsts[i], fsPerm.getUserAction());
+  checkAccessPermissions(fs.getFileStatus(dsts[i]), 
fsPerm.getUserAction());
 }
 
 // test setOwner
@@ -470,4 +474,34 @@ public class TestAsyncDFS {
   assertTrue("group2".equals(fs.getFileStatus(dsts[i]).getGroup()));
 }
   }
+
+  static void checkAccessPermissions(FileStatus stat, FsAction mode)
+  throws IOException {
+checkAccessPermissions(UserGroupInformation.getCurrentUser(), stat, mode);
+  }
+
+  static void checkAccessPermissions(final UserGroupInformation ugi,
+  FileStatus stat, FsAction mode) throws IOException {
+FsPermission perm = stat.getPermission();
+String user = ugi.getShortUserName();
+List groups = Arrays.asList(ugi.getGroupNames());
+
+if (user.equals(stat.getOwner())) {
+  if (perm.getUserAction().implies(mode)) {
+return;
+  }
+} else if (groups.contains(stat.getGroup())) {
+  if (perm.getGroupAction().implies(mode)) {
+return;
+  }
+} else {
+  if (perm.getOtherAction().implies(mode)) {
+return;
+  }
+}
+throw new AccessControlException(String.format(
+"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat
+.getPath(), stat.getOwner(), stat.getGroup(),
+stat.isDirectory() ? "d" : "-", perm));
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/47] hadoop git commit: HADOOP-12807 S3AFileSystem should read AWS credentials from environment variables. Contributed by Tobin Baker.

2016-06-07 Thread aengineer
HADOOP-12807 S3AFileSystem should read AWS credentials from environment 
variables. Contributed by Tobin Baker.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3f78d8f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3f78d8f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3f78d8f

Branch: refs/heads/HDFS-1312
Commit: a3f78d8fa83f07f9183f3546203a191fcf50008c
Parents: 4a1cedc
Author: Steve Loughran 
Authored: Mon Jun 6 23:40:49 2016 +0200
Committer: Steve Loughran 
Committed: Mon Jun 6 23:42:36 2016 +0200

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java  |  2 ++
 .../src/site/markdown/tools/hadoop-aws/index.md  | 19 +++
 2 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3f78d8f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index c028544..0281a3a 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
+import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.AmazonServiceException;
 import com.amazonaws.ClientConfiguration;
@@ -464,6 +465,7 @@ public class S3AFileSystem extends FileSystem {
   new BasicAWSCredentialsProvider(
   creds.getAccessKey(), creds.getAccessSecret()),
   new InstanceProfileCredentialsProvider(),
+  new EnvironmentVariableCredentialsProvider(),
   new AnonymousAWSCredentialsProvider()
   );
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3f78d8f/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 7a5e455..7d63a86 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -202,6 +202,25 @@ credentials in S3AFileSystem.
 For additional reading on the credential provider API see:
 [Credential Provider 
API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html).
 
+ Authenticating via environment variables
+
+S3A supports configuration via [the standard AWS environment 
variables](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-environment).
+
+The core environment variables are for the access key and associated secret:
+
+```
+export AWS_ACCESS_KEY_ID=my.aws.key
+export AWS_SECRET_ACCESS_KEY=my.secret.key
+```
+
+These environment variables can be used to set the authentication credentials
+instead of properties in the Hadoop configuration. *Important:* these
+environment variables are not propagated from client to server when
+YARN applications are launched. That is: having the AWS environment variables
+set when an application is launched will not permit the launched application
+to access S3 resources. The environment variables must (somehow) be set
+on the hosts/processes where the work is executed.
+
 # End to End Steps for Distcp and S3 with Credential Providers
 
 ## provision


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/47] hadoop git commit: YARN-4525. Fix bug in RLESparseResourceAllocation.getRangeOverlapping(). (Ishai Menache and Carlo Curino via asuresh)

2016-06-07 Thread aengineer
YARN-4525. Fix bug in RLESparseResourceAllocation.getRangeOverlapping(). (Ishai 
Menache and Carlo Curino via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a154f75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a154f75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a154f75

Branch: refs/heads/HDFS-1312
Commit: 3a154f75ed85d864b3ffd35818992418f2b6aa59
Parents: 7a9b737
Author: Arun Suresh 
Authored: Mon Jun 6 21:18:32 2016 -0700
Committer: Arun Suresh 
Committed: Mon Jun 6 21:18:32 2016 -0700

--
 .../RLESparseResourceAllocation.java|  6 +-
 .../TestRLESparseResourceAllocation.java| 22 
 2 files changed, 27 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a154f75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
index 63defb5..c18a93e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
@@ -510,7 +510,11 @@ public class RLESparseResourceAllocation {
   long previous = a.floorKey(start);
   a = a.tailMap(previous, true);
 }
-a = a.headMap(end, true);
+
+if (end < a.lastKey()) {
+  a = a.headMap(end, true);
+}
+
   }
   RLESparseResourceAllocation ret =
   new RLESparseResourceAllocation(a, resourceCalculator);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a154f75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
index b526484..f8d2a4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
@@ -283,6 +283,28 @@ public class TestRLESparseResourceAllocation {
   }
 
   @Test
+  public void testRangeOverlapping() {
+ResourceCalculator resCalc = new DefaultResourceCalculator();
+
+RLESparseResourceAllocation r =
+new RLESparseResourceAllocation(resCalc);
+int[] alloc = {10, 10, 10, 10, 10, 10};
+int start = 100;
+Set> inputs =
+generateAllocation(start, alloc, false).entrySet();
+for (Entry ip : inputs) {
+  r.addInterval(ip.getKey(), ip.getValue());
+}
+long s = r.getEarliestStartTime();
+long d = r.getLatestNonNullTime();
+
+// tries to trigger "out-of-range" bug
+r =  r.getRangeOverlapping(s, d);
+r = r.getRangeOverlapping(s-1, d-1);
+r = r.getRangeOverlapping(s+1, d+1);
+  }
+
+  @Test
   public void testBlocks() {
 ResourceCalculator resCalc = new DefaultResourceCalculator();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/47] hadoop git commit: Revert "HADOOP-12957. Limit the number of outstanding async calls. Contributed by Xiaobing Zhou"

2016-06-07 Thread aengineer
Revert "HADOOP-12957. Limit the number of outstanding async calls.  Contributed 
by Xiaobing Zhou"

This reverts commit 1b9f18623ab55507bea94888317c7d63d0f4a6f2.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d36b221
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d36b221
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d36b221

Branch: refs/heads/HDFS-1312
Commit: 4d36b221a24e3b626bb91093b0bb0fd377061cae
Parents: f23d5df
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:18 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:18 2016 -0700

--
 .../hadoop/fs/CommonConfigurationKeys.java  |   3 -
 .../ipc/AsyncCallLimitExceededException.java|  36 ---
 .../main/java/org/apache/hadoop/ipc/Client.java |  66 +
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 199 ++--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  12 +-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 238 ++-
 6 files changed, 109 insertions(+), 445 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d36b221/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 06614db..86e1b43 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -324,9 +324,6 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
 4*60*60; // 4 hours
   
-  public static final String  IPC_CLIENT_ASYNC_CALLS_MAX_KEY =
-  "ipc.client.async.calls.max";
-  public static final int IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT = 100;
   public static final String  IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = 
"ipc.client.fallback-to-simple-auth-allowed";
   public static final boolean 
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d36b221/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
deleted file mode 100644
index db97b6c..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ipc;
-
-import java.io.IOException;
-
-/**
- * Signals that an AsyncCallLimitExceededException has occurred. This class is
- * used to make application code using async RPC aware that limit of max async
- * calls is reached, application code need to retrieve results from response of
- * established async calls to avoid buffer overflow in order for follow-on 
async
- * calls going correctly.
- */
-public class AsyncCallLimitExceededException extends IOException {
-  private static final long serialVersionUID = 1L;
-
-  public AsyncCallLimitExceededException(String message) {
-super(message);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d36b221/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 

[02/47] hadoop git commit: YARN-5088. Improve "yarn log" command-line to read the last K bytes for the log files. Contributed by Xuan Gong

2016-06-07 Thread aengineer
YARN-5088. Improve "yarn log" command-line to read the last K bytes for the log 
files. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bc05e40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bc05e40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bc05e40

Branch: refs/heads/HDFS-1312
Commit: 0bc05e40fa7e183efe8463ada459c621da3ce3bf
Parents: 35356de
Author: Xuan 
Authored: Wed Jun 1 13:44:21 2016 -0700
Committer: Xuan 
Committed: Wed Jun 1 13:44:21 2016 -0700

--
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  | 17 +++-
 .../hadoop/yarn/client/cli/TestLogsCLI.java | 70 ++-
 .../logaggregation/AggregatedLogFormat.java | 89 ++--
 .../logaggregation/ContainerLogsRequest.java| 13 ++-
 .../yarn/logaggregation/LogCLIHelpers.java  | 25 +++---
 .../webapp/AHSWebServices.java  | 50 ---
 .../webapp/TestAHSWebServices.java  | 66 +++
 .../nodemanager/webapp/NMWebServices.java   | 49 +--
 .../nodemanager/webapp/TestNMWebServices.java   | 48 ++-
 9 files changed, 384 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc05e40/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 2127006..bbe636f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -88,6 +88,7 @@ public class LogsCLI extends Configured implements Tool {
   private static final String SHOW_META_INFO = "show_meta_info";
   private static final String LIST_NODES_OPTION = "list_nodes";
   private static final String OUT_OPTION = "out";
+  private static final String SIZE_OPTION = "size";
   public static final String HELP_CMD = "help";
 
   @Override
@@ -113,6 +114,7 @@ public class LogsCLI extends Configured implements Tool {
 String[] logFiles = null;
 List amContainersList = new ArrayList();
 String localDir = null;
+long bytes = Long.MAX_VALUE;
 try {
   CommandLine commandLine = parser.parse(opts, args, true);
   appIdStr = commandLine.getOptionValue(APPLICATION_ID_OPTION);
@@ -134,6 +136,9 @@ public class LogsCLI extends Configured implements Tool {
   if (commandLine.hasOption(CONTAINER_LOG_FILES)) {
 logFiles = commandLine.getOptionValues(CONTAINER_LOG_FILES);
   }
+  if (commandLine.hasOption(SIZE_OPTION)) {
+bytes = Long.parseLong(commandLine.getOptionValue(SIZE_OPTION));
+  }
 } catch (ParseException e) {
   System.err.println("options parsing failed: " + e.getMessage());
   printHelpMessage(printOpts);
@@ -195,7 +200,7 @@ public class LogsCLI extends Configured implements Tool {
 
 ContainerLogsRequest request = new ContainerLogsRequest(appId,
 isApplicationFinished(appState), appOwner, nodeAddress, null,
-containerIdStr, localDir, logs);
+containerIdStr, localDir, logs, bytes);
 
 if (showMetaInfo) {
   return showMetaInfo(request, logCliHelper);
@@ -402,6 +407,7 @@ public class LogsCLI extends Configured implements Tool {
   ClientResponse response =
   webResource.path("ws").path("v1").path("node")
 .path("containerlogs").path(containerIdStr).path(logFile)
+.queryParam("size", Long.toString(request.getBytes()))
 .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
   out.println(response.getEntity(String.class));
   out.println("End of LogType:" + logFile);
@@ -442,7 +448,9 @@ public class LogsCLI extends Configured implements Tool {
 newOptions);
   }
 
-  private ContainerReport getContainerReport(String containerIdStr)
+  @Private
+  @VisibleForTesting
+  public ContainerReport getContainerReport(String containerIdStr)
   throws YarnException, IOException {
 YarnClient yarnClient = createYarnClient();
 try {
@@ -636,12 +644,16 @@ public class LogsCLI extends Configured implements Tool {
 opts.addOption(OUT_OPTION, true, "Local directory for storing individual "
 + "container logs. The container logs will be stored based on the "
 + "node the container ran on.");
+opts.addOption(SIZE_OPTION, true, "Prints the 

[41/47] hadoop git commit: HDFS-10458. getFileEncryptionInfo should return quickly for non-encrypted cluster.

2016-06-07 Thread aengineer
HDFS-10458. getFileEncryptionInfo should return quickly for non-encrypted 
cluster.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6de9213d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6de9213d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6de9213d

Branch: refs/heads/HDFS-1312
Commit: 6de9213df111a9a4ed875db995d67af72d08a798
Parents: a3f78d8
Author: Zhe Zhang 
Authored: Mon Jun 6 15:52:39 2016 -0700
Committer: Zhe Zhang 
Committed: Mon Jun 6 15:52:39 2016 -0700

--
 .../server/namenode/EncryptionZoneManager.java  | 35 +---
 .../server/namenode/FSDirEncryptionZoneOp.java  |  2 +-
 2 files changed, 31 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6de9213d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 8454c04..41dbb59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -95,7 +95,7 @@ public class EncryptionZoneManager {
 }
   }
 
-  private final TreeMap encryptionZones;
+  private TreeMap encryptionZones = null;
   private final FSDirectory dir;
   private final int maxListEncryptionZonesResponses;
 
@@ -106,7 +106,6 @@ public class EncryptionZoneManager {
*/
   public EncryptionZoneManager(FSDirectory dir, Configuration conf) {
 this.dir = dir;
-encryptionZones = new TreeMap();
 maxListEncryptionZonesResponses = conf.getInt(
 DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
 DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT
@@ -143,6 +142,9 @@ public class EncryptionZoneManager {
   CipherSuite suite, CryptoProtocolVersion version, String keyName) {
 final EncryptionZoneInt ez = new EncryptionZoneInt(
 inodeId, suite, version, keyName);
+if (encryptionZones == null) {
+  encryptionZones = new TreeMap<>();
+}
 encryptionZones.put(inodeId, ez);
   }
 
@@ -153,7 +155,9 @@ public class EncryptionZoneManager {
*/
   void removeEncryptionZone(Long inodeId) {
 assert dir.hasWriteLock();
-encryptionZones.remove(inodeId);
+if (hasCreatedEncryptionZone()) {
+  encryptionZones.remove(inodeId);
+}
   }
 
   /**
@@ -201,6 +205,9 @@ public class EncryptionZoneManager {
   private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) {
 assert dir.hasReadLock();
 Preconditions.checkNotNull(iip);
+if (!hasCreatedEncryptionZone()) {
+  return null;
+}
 List inodes = iip.getReadOnlyINodes();
 for (int i = inodes.size() - 1; i >= 0; i--) {
   final INode inode = inodes.get(i);
@@ -313,7 +320,8 @@ public class EncryptionZoneManager {
   throw new IOException("Attempt to create an encryption zone for a 
file.");
 }
 
-if (encryptionZones.get(srcINode.getId()) != null) {
+if (hasCreatedEncryptionZone() && encryptionZones.
+get(srcINode.getId()) != null) {
   throw new IOException("Directory " + src + " is already an encryption " +
   "zone.");
 }
@@ -340,6 +348,9 @@ public class EncryptionZoneManager {
   BatchedListEntries listEncryptionZones(long prevId)
   throws IOException {
 assert dir.hasReadLock();
+if (!hasCreatedEncryptionZone()) {
+  return new BatchedListEntries(Lists.newArrayList(), 
false);
+}
 NavigableMap tailMap = encryptionZones.tailMap
 (prevId, false);
 final int numResponses = Math.min(maxListEncryptionZonesResponses,
@@ -379,7 +390,18 @@ public class EncryptionZoneManager {
* @return number of encryption zones.
*/
   public int getNumEncryptionZones() {
-return encryptionZones.size();
+return hasCreatedEncryptionZone() ?
+encryptionZones.size() : 0;
+  }
+
+  /**
+   * @return Whether there has been any attempt to create an encryption zone in
+   * the cluster at all. If not, it is safe to quickly return null when
+   * checking the encryption information of any file or directory in the
+   * cluster.
+   */
+  public boolean hasCreatedEncryptionZone() {
+return encryptionZones != null;
   }
 
   /**
@@ -387,6 +409,9 @@ public class 

[10/47] hadoop git commit: HDFS-10341. Add a metric to expose the timeout number of pending replication blocks. (Contributed by Akira Ajisaka)

2016-06-07 Thread aengineer
HDFS-10341. Add a metric to expose the timeout number of pending replication 
blocks. (Contributed by Akira Ajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97e24494
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97e24494
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97e24494

Branch: refs/heads/HDFS-1312
Commit: 97e244947719d483c3f80521a00fec8e13dcb637
Parents: 1df6f57
Author: Arpit Agarwal 
Authored: Thu Jun 2 13:14:45 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 2 13:14:45 2016 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md  |  1 +
 .../server/blockmanagement/BlockManager.java|  4 
 .../PendingReconstructionBlocks.java| 16 +++-
 .../hdfs/server/namenode/FSNamesystem.java  |  5 +
 .../TestPendingReconstruction.java  | 20 ++--
 5 files changed, 39 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97e24494/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 699316f..e4e2443 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -218,6 +218,7 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `TotalSyncCount` | Total number of sync operations performed by edit log |
 | `TotalSyncTimes` | Total number of milliseconds spent by various edit logs 
in sync operation|
 | `NameDirSize` | NameNode name directories size in bytes |
+| `NumTimedOutPendingReconstructions` | The number of timed out 
reconstructions. Not the number of unique blocks that timed out. |
 
 JournalNode
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97e24494/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ed57a86..1a76e09 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -198,6 +198,10 @@ public class BlockManager implements BlockStatsMXBean {
   public int getPendingDataNodeMessageCount() {
 return pendingDNMessages.count();
   }
+  /** Used by metrics. */
+  public long getNumTimedOutPendingReconstructions() {
+return pendingReconstruction.getNumTimedOuts();
+  }
 
   /**replicationRecheckInterval is how often namenode checks for new 
replication work*/
   private final long replicationRecheckInterval;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97e24494/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
index 528199c..956e94f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
@@ -50,6 +50,7 @@ class PendingReconstructionBlocks {
   private final ArrayList timedOutItems;
   Daemon timerThread = null;
   private volatile boolean fsRunning = true;
+  private long timedOutCount = 0L;
 
   //
   // It might take anywhere between 5 to 10 minutes before
@@ -125,6 +126,7 @@ class PendingReconstructionBlocks {
 synchronized (pendingReconstructions) {
   pendingReconstructions.clear();
   timedOutItems.clear();
+  timedOutCount = 0L;
 }
   }
 
@@ -149,6 +151,16 @@ class PendingReconstructionBlocks {
   }
 
   /**
+   * Used for metrics.
+   * @return The number of timeouts
+   */
+  long getNumTimedOuts() {
+synchronized (timedOutItems) {
+  return timedOutCount + timedOutItems.size();
+}
+  }
+
+  /**
* Returns a list of blocks that have timed 

[20/47] hadoop git commit: HDFS-9877. HDFS Namenode UI: Fix browsing directories that need to be encoded (Ravi Prakash via aw)

2016-06-07 Thread aengineer
HDFS-9877. HDFS Namenode UI: Fix browsing directories that need to be encoded 
(Ravi Prakash via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15f01843
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15f01843
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15f01843

Branch: refs/heads/HDFS-1312
Commit: 15f018434c5b715729488fd0b03a11f1bc943470
Parents: 713cb71
Author: Allen Wittenauer 
Authored: Fri Jun 3 17:06:29 2016 -0700
Committer: Allen Wittenauer 
Committed: Fri Jun 3 17:06:29 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15f01843/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 102da9d..adb83a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -33,7 +33,7 @@
   $(window).bind('hashchange', function () {
 $('#alert-panel').hide();
 
-var dir = window.location.hash.slice(1);
+var dir = decodeURIComponent(window.location.hash.slice(1));
 if(dir == "") {
   dir = "/";
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/47] hadoop git commit: HDFS-10485. Fix findbugs warning in FSEditLog.java. (aajisaka)

2016-06-07 Thread aengineer
HDFS-10485. Fix findbugs warning in FSEditLog.java. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6205303
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6205303
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6205303

Branch: refs/heads/HDFS-1312
Commit: e620530301fd3e62537d4b7bc3d8ed296bda1ffc
Parents: bddea5f
Author: Akira Ajisaka 
Authored: Tue Jun 7 17:52:03 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Jun 7 17:52:55 2016 +0900

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLog.java| 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6205303/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 809d9e6..57229da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -27,6 +27,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -174,7 +175,7 @@ public class FSEditLog implements LogsPurgeable {
   
   // these are statistics counters.
   private long numTransactions;// number of transactions
-  private long numTransactionsBatchedInSync;
+  private final AtomicLong numTransactionsBatchedInSync = new AtomicLong();
   private long totalTimeTransactions;  // total time for all transactions
   private NameNodeMetrics metrics;
 
@@ -672,7 +673,7 @@ public class FSEditLog implements LogsPurgeable {
   if (metrics != null) { // Metrics non-null only when used inside name 
node
 metrics.addSync(elapsed);
 metrics.incrTransactionsBatchedInSync(editsBatchedInSync);
-numTransactionsBatchedInSync += editsBatchedInSync;
+numTransactionsBatchedInSync.addAndGet(editsBatchedInSync);
   }
   
 } finally {
@@ -712,7 +713,7 @@ public class FSEditLog implements LogsPurgeable {
 buf.append(" Total time for transactions(ms): ");
 buf.append(totalTimeTransactions);
 buf.append(" Number of transactions batched in Syncs: ");
-buf.append(numTransactionsBatchedInSync);
+buf.append(numTransactionsBatchedInSync.get());
 buf.append(" Number of syncs: ");
 buf.append(editLogStream.getNumSync());
 buf.append(" SyncTimes(ms): ");
@@ -1281,7 +1282,9 @@ public class FSEditLog implements LogsPurgeable {
 "Cannot start log segment at txid %s when next expected " +
 "txid is %s", segmentTxId, txid + 1);
 
-numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0;
+numTransactions = 0;
+totalTimeTransactions = 0;
+numTransactionsBatchedInSync.set(0L);
 
 // TODO no need to link this back to storage anymore!
 // See HDFS-2174.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/47] hadoop git commit: Revert "HDFS-10224. Implement asynchronous rename for DistributedFileSystem. Contributed by Xiaobing Zhou"

2016-06-07 Thread aengineer
Revert "HDFS-10224. Implement asynchronous rename for DistributedFileSystem.  
Contributed by Xiaobing Zhou"

This reverts commit fc94810d3f537e51e826fc21ade7867892b9d8dc.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/106234d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/106234d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/106234d8

Branch: refs/heads/HDFS-1312
Commit: 106234d873c60fa52cd0d812fb1cdc0c6b998a6d
Parents: 4d36b22
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:55 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:55 2016 -0700

--
 .../java/org/apache/hadoop/fs/FileSystem.java   |   1 +
 .../main/java/org/apache/hadoop/ipc/Client.java |  11 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  34 +--
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |   2 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java | 110 
 .../hadoop/hdfs/DistributedFileSystem.java  |  22 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  45 +---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 258 ---
 8 files changed, 20 insertions(+), 463 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/106234d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 9e13a7a..0ecd8b7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1252,6 +1252,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   /**
* Renames Path src to Path dst
* 
+   * Fails if src is a file and dst is a directory.
* Fails if src is a directory and dst is a file.
* Fails if the parent of dst does not exist or is a file.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106234d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index d59aeb89..f206861 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -119,8 +119,7 @@ public class Client implements AutoCloseable {
 
   private static final ThreadLocal callId = new 
ThreadLocal();
   private static final ThreadLocal retryCount = new 
ThreadLocal();
-  private static final ThreadLocal
-  RETURN_RPC_RESPONSE = new ThreadLocal<>();
+  private static final ThreadLocal returnValue = new 
ThreadLocal<>();
   private static final ThreadLocal asynchronousMode =
   new ThreadLocal() {
 @Override
@@ -131,8 +130,8 @@ public class Client implements AutoCloseable {
 
   @SuppressWarnings("unchecked")
   @Unstable
-  public static  Future getReturnRpcResponse() {
-return (Future) RETURN_RPC_RESPONSE.get();
+  public static  Future getReturnValue() {
+return (Future) returnValue.get();
   }
 
   /** Set call id and retry count for the next call. */
@@ -1397,7 +1396,7 @@ public class Client implements AutoCloseable {
 }
   };
 
-  RETURN_RPC_RESPONSE.set(returnFuture);
+  returnValue.set(returnFuture);
   return null;
 } else {
   return getRpcResponse(call, connection);
@@ -1411,7 +1410,7 @@ public class Client implements AutoCloseable {
*  synchronous mode.
*/
   @Unstable
-  public static boolean isAsynchronousMode() {
+  static boolean isAsynchronousMode() {
 return asynchronousMode.get();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106234d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 8fcdb78..071e2e8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -26,9 +26,7 @@ import 

[35/47] hadoop git commit: Revert "Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for DistributedFileSystem. Contributed by Xiaobing Zhou""

2016-06-07 Thread aengineer
Revert "Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for 
DistributedFileSystem.  Contributed by Xiaobing Zhou""

This reverts commit b82c74b9102ba95eae776501ed4484be9edd8c96.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3d81f38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3d81f38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3d81f38

Branch: refs/heads/HDFS-1312
Commit: b3d81f38da5d3d913e7b7ed498198c899c1e68b7
Parents: 574dcd3
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:30 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:30 2016 +0800

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../hadoop/hdfs/DistributedFileSystem.java  |   3 +
 .../ClientNamenodeProtocolTranslatorPB.java |  30 +-
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 310 +++
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  |  15 +-
 .../hdfs/server/namenode/FSAclBaseTest.java |  12 +-
 6 files changed, 411 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d81f38/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 6bfd71d..29bac2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -19,12 +19,16 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.concurrent.Future;
 
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.util.concurrent.AsyncGetFuture;
 import org.apache.hadoop.ipc.Client;
@@ -83,6 +87,7 @@ public class AsyncDistributedFileSystem {
   public Future rename(Path src, Path dst,
   final Options.Rename... options) throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.RENAME);
 
 final Path absSrc = dfs.fixRelativePart(src);
 final Path absDst = dfs.fixRelativePart(dst);
@@ -111,6 +116,7 @@ public class AsyncDistributedFileSystem {
   public Future setPermission(Path p, final FsPermission permission)
   throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_PERMISSION);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -142,6 +148,7 @@ public class AsyncDistributedFileSystem {
 }
 
 dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_OWNER);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -152,4 +159,56 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
+
+  /**
+   * Fully replaces ACL of files and directories, discarding all existing
+   * entries.
+   *
+   * @param p
+   *  Path to modify
+   * @param aclSpec
+   *  List describing modifications, must include entries for
+   *  user, group, and others for compatibility with permission bits.
+   * @throws IOException
+   *   if an ACL could not be modified
+   * @return an instance of Future, #get of which is invoked to wait for
+   * asynchronous call being finished.
+   */
+  public Future setAcl(Path p, final List aclSpec)
+  throws IOException {
+dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_ACL);
+final Path absPath = dfs.fixRelativePart(p);
+final boolean isAsync = Client.isAsynchronousMode();
+Client.setAsynchronousMode(true);
+try {
+  dfs.getClient().setAcl(dfs.getPathName(absPath), aclSpec);
+  return getReturnValue();
+} 

[37/47] hadoop git commit: Revert "Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. Contributed by Xiaobing Zhou.""

2016-06-07 Thread aengineer
Revert "Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. 
Contributed by Xiaobing Zhou.""

This reverts commit 8cf47d8589badfc07ef4bca3328a420c7c68abbd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e7b1ae0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e7b1ae0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e7b1ae0

Branch: refs/heads/HDFS-1312
Commit: 7e7b1ae03759da0becfef677e1d5f7a2ed9041c3
Parents: db41e6d
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:38 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:38 2016 +0800

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 36 +---
 1 file changed, 1 insertion(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e7b1ae0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index ddcf492..c7615a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -34,7 +34,6 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -46,19 +45,16 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -445,7 +441,7 @@ public class TestAsyncDFS {
 for (int i = 0; i < NUM_TESTS; i++) {
   assertTrue(fs.exists(dsts[i]));
   FsPermission fsPerm = new FsPermission(permissions[i]);
-  checkAccessPermissions(fs.getFileStatus(dsts[i]), 
fsPerm.getUserAction());
+  fs.access(dsts[i], fsPerm.getUserAction());
 }
 
 // test setOwner
@@ -474,34 +470,4 @@ public class TestAsyncDFS {
   assertTrue("group2".equals(fs.getFileStatus(dsts[i]).getGroup()));
 }
   }
-
-  static void checkAccessPermissions(FileStatus stat, FsAction mode)
-  throws IOException {
-checkAccessPermissions(UserGroupInformation.getCurrentUser(), stat, mode);
-  }
-
-  static void checkAccessPermissions(final UserGroupInformation ugi,
-  FileStatus stat, FsAction mode) throws IOException {
-FsPermission perm = stat.getPermission();
-String user = ugi.getShortUserName();
-List groups = Arrays.asList(ugi.getGroupNames());
-
-if (user.equals(stat.getOwner())) {
-  if (perm.getUserAction().implies(mode)) {
-return;
-  }
-} else if (groups.contains(stat.getGroup())) {
-  if (perm.getGroupAction().implies(mode)) {
-return;
-  }
-} else {
-  if (perm.getOtherAction().implies(mode)) {
-return;
-  }
-}
-throw new AccessControlException(String.format(
-"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat
-.getPath(), stat.getOwner(), stat.getGroup(),
-stat.isDirectory() ? "d" : "-", perm));
-  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/47] hadoop git commit: Revert "HDFS-10346. Implement asynchronous setPermission/setOwner for DistributedFileSystem. Contributed by Xiaobing Zhou"

2016-06-07 Thread aengineer
Revert "HDFS-10346. Implement asynchronous setPermission/setOwner for 
DistributedFileSystem.  Contributed by  Xiaobing Zhou"

This reverts commit 7251bb922b20dae49c8c6854864095fb16d8cbd5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f23d5dfc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f23d5dfc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f23d5dfc

Branch: refs/heads/HDFS-1312
Commit: f23d5dfc60a017187ae57f3667ac0e688877c2dd
Parents: e4450d4
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:17 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:17 2016 -0700

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../ClientNamenodeProtocolTranslatorPB.java |  39 +--
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 267 ++-
 .../apache/hadoop/hdfs/TestDFSPermission.java   |  29 +-
 4 files changed, 43 insertions(+), 351 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f23d5dfc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 4fe0861..356ae3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -27,7 +27,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.ipc.Client;
 
@@ -38,9 +37,6 @@ import com.google.common.util.concurrent.AbstractFuture;
  * This instance of this class is the way end-user code interacts
  * with a Hadoop DistributedFileSystem in an asynchronous manner.
  *
- * This class is unstable, so no guarantee is provided as to reliability,
- * stability or compatibility across any level of release granularity.
- *
  */
 @Unstable
 public class AsyncDistributedFileSystem {
@@ -115,59 +111,4 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
-
-  /**
-   * Set permission of a path.
-   *
-   * @param p
-   *  the path the permission is set to
-   * @param permission
-   *  the permission that is set to a path.
-   * @return an instance of Future, #get of which is invoked to wait for
-   * asynchronous call being finished.
-   */
-  public Future setPermission(Path p, final FsPermission permission)
-  throws IOException {
-dfs.getFsStatistics().incrementWriteOps(1);
-final Path absPath = dfs.fixRelativePart(p);
-final boolean isAsync = Client.isAsynchronousMode();
-Client.setAsynchronousMode(true);
-try {
-  dfs.getClient().setPermission(dfs.getPathName(absPath), permission);
-  return getReturnValue();
-} finally {
-  Client.setAsynchronousMode(isAsync);
-}
-  }
-
-  /**
-   * Set owner of a path (i.e. a file or a directory). The parameters username
-   * and groupname cannot both be null.
-   *
-   * @param p
-   *  The path
-   * @param username
-   *  If it is null, the original username remains unchanged.
-   * @param groupname
-   *  If it is null, the original groupname remains unchanged.
-   * @return an instance of Future, #get of which is invoked to wait for
-   * asynchronous call being finished.
-   */
-  public Future setOwner(Path p, String username, String groupname)
-  throws IOException {
-if (username == null && groupname == null) {
-  throw new IOException("username == null && groupname == null");
-}
-
-dfs.getFsStatistics().incrementWriteOps(1);
-final Path absPath = dfs.fixRelativePart(p);
-final boolean isAsync = Client.isAsynchronousMode();
-Client.setAsynchronousMode(true);
-try {
-  dfs.getClient().setOwner(dfs.getPathName(absPath), username, groupname);
-  return getReturnValue();
-} finally {
-  Client.setAsynchronousMode(isAsync);
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f23d5dfc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

[11/47] hadoop git commit: HADOOP-13171. Add StorageStatistics to S3A; instrument some more operations. Contributed by Steve Loughran.

2016-06-07 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58a59f7/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
new file mode 100644
index 000..d29cb2f
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+/**
+ * Statistic which are collected in S3A.
+ * These statistics are available at a low level in {@link 
S3AStorageStatistics}
+ * and as metrics in {@link S3AInstrumentation}
+ */
+public enum Statistic {
+
+  DIRECTORIES_CREATED("directories_created",
+  "Total number of directories created through the object store."),
+  DIRECTORIES_DELETED("directories_deleted",
+  "Total number of directories deleted through the object store."),
+  FILES_COPIED("files_copied",
+  "Total number of files copied within the object store."),
+  FILES_COPIED_BYTES("files_copied_bytes",
+  "Total number of bytes copied within the object store."),
+  FILES_CREATED("files_created",
+  "Total number of files created through the object store."),
+  FILES_DELETED("files_deleted",
+  "Total number of files deleted from the object store."),
+  IGNORED_ERRORS("ignored_errors", "Errors caught and ignored"),
+  INVOCATION_COPY_FROM_LOCAL_FILE("invocations_copyfromlocalfile",
+  "Calls of copyFromLocalFile()"),
+  INVOCATION_EXISTS("invocations_exists",
+  "Calls of exists()"),
+  INVOCATION_GET_FILE_STATUS("invocations_getfilestatus",
+  "Calls of getFileStatus()"),
+  INVOCATION_GLOB_STATUS("invocations_globstatus",
+  "Calls of globStatus()"),
+  INVOCATION_IS_DIRECTORY("invocations_is_directory",
+  "Calls of isDirectory()"),
+  INVOCATION_IS_FILE("invocations_is_file",
+  "Calls of isFile()"),
+  INVOCATION_LIST_FILES("invocations_listfiles",
+  "Calls of listFiles()"),
+  INVOCATION_LIST_LOCATED_STATUS("invocations_listlocatedstatus",
+  "Calls of listLocatedStatus()"),
+  INVOCATION_LIST_STATUS("invocations_liststatus",
+  "Calls of listStatus()"),
+  INVOCATION_MKDIRS("invocations_mdkirs",
+  "Calls of mkdirs()"),
+  INVOCATION_RENAME("invocations_rename",
+  "Calls of rename()"),
+  OBJECT_COPY_REQUESTS("object_copy_requests", "Object copy requests"),
+  OBJECT_DELETE_REQUESTS("object_delete_requests", "Object delete requests"),
+  OBJECT_LIST_REQUESTS("object_list_requests",
+  "Number of object listings made"),
+  OBJECT_METADATA_REQUESTS("object_metadata_requests",
+  "Number of requests for object metadata"),
+  OBJECT_MULTIPART_UPLOAD_ABORTED("object_multipart_aborted",
+  "Object multipart upload aborted"),
+  OBJECT_PUT_REQUESTS("object_put_requests",
+  "Object put/multipart upload count"),
+  OBJECT_PUT_BYTES("object_put_bytes", "number of bytes uploaded"),
+  STREAM_ABORTED("streamAborted",
+  "Count of times the TCP stream was aborted"),
+  STREAM_BACKWARD_SEEK_OPERATIONS("streamBackwardSeekOperations",
+  "Number of executed seek operations which went backwards in a stream"),
+  STREAM_CLOSED("streamClosed", "Count of times the TCP stream was closed"),
+  STREAM_CLOSE_OPERATIONS("streamCloseOperations",
+  "Total count of times an attempt to close a data stream was made"),
+  STREAM_FORWARD_SEEK_OPERATIONS("streamForwardSeekOperations",
+  "Number of executed seek operations which went forward in a stream"),
+  STREAM_OPENED("streamOpened",
+  "Total count of times an input stream to object store was opened"),
+  STREAM_READ_EXCEPTIONS("streamReadExceptions",
+  "Number of seek operations invoked on input streams"),
+  STREAM_READ_FULLY_OPERATIONS("streamReadFullyOperations",
+  "count of readFully() operations in streams"),
+  STREAM_READ_OPERATIONS("streamReadOperations",
+  "Count of read() operations in streams"),
+  STREAM_READ_OPERATIONS_INCOMPLETE("streamReadOperationsIncomplete",
+  "Count of incomplete 

[32/47] hadoop git commit: Revert "Revert "HADOOP-12957. Limit the number of outstanding async calls. Contributed by Xiaobing Zhou""

2016-06-07 Thread aengineer
Revert "Revert "HADOOP-12957. Limit the number of outstanding async calls.  
Contributed by Xiaobing Zhou""

This reverts commit 4d36b221a24e3b626bb91093b0bb0fd377061cae.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa20fa15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa20fa15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa20fa15

Branch: refs/heads/HDFS-1312
Commit: aa20fa150d522b9fe469dd99a8e24d7e27d888ea
Parents: eded3d1
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:28:47 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:28:47 2016 +0800

--
 .../hadoop/fs/CommonConfigurationKeys.java  |   3 +
 .../ipc/AsyncCallLimitExceededException.java|  36 +++
 .../main/java/org/apache/hadoop/ipc/Client.java |  66 -
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 199 ++--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  12 +-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 238 +--
 6 files changed, 445 insertions(+), 109 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa20fa15/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 86e1b43..06614db 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -324,6 +324,9 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
 4*60*60; // 4 hours
   
+  public static final String  IPC_CLIENT_ASYNC_CALLS_MAX_KEY =
+  "ipc.client.async.calls.max";
+  public static final int IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT = 100;
   public static final String  IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = 
"ipc.client.fallback-to-simple-auth-allowed";
   public static final boolean 
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa20fa15/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
new file mode 100644
index 000..db97b6c
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.io.IOException;
+
+/**
+ * Signals that an AsyncCallLimitExceededException has occurred. This class is
+ * used to make application code using async RPC aware that limit of max async
+ * calls is reached, application code need to retrieve results from response of
+ * established async calls to avoid buffer overflow in order for follow-on 
async
+ * calls going correctly.
+ */
+public class AsyncCallLimitExceededException extends IOException {
+  private static final long serialVersionUID = 1L;
+
+  public AsyncCallLimitExceededException(String message) {
+super(message);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa20fa15/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 

[14/47] hadoop git commit: YARN-5190. Registering/unregistering container metrics in ContainerMonitorImpl and ContainerImpl causing uncaught exception in ContainerMonitorImpl. Contributed by Junping D

2016-06-07 Thread aengineer
YARN-5190. Registering/unregistering container metrics in ContainerMonitorImpl 
and ContainerImpl causing uncaught exception in ContainerMonitorImpl. 
Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99cc439e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99cc439e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99cc439e

Branch: refs/heads/HDFS-1312
Commit: 99cc439e29794f8e61bebe03b2a7ca4b6743ec92
Parents: 097baaa
Author: Jian He 
Authored: Fri Jun 3 11:10:42 2016 -0700
Committer: Jian He 
Committed: Fri Jun 3 11:10:42 2016 -0700

--
 .../hadoop/metrics2/impl/MetricsSystemImpl.java   |  1 +
 .../hadoop/metrics2/lib/DefaultMetricsSystem.java |  9 +
 .../monitor/ContainerMetrics.java | 18 +-
 .../monitor/ContainersMonitorImpl.java| 16 
 .../monitor/TestContainerMetrics.java |  4 +++-
 5 files changed, 38 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99cc439e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
index ef7306b..6986edb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
@@ -255,6 +255,7 @@ public class MetricsSystemImpl extends MetricsSystem 
implements MetricsSource {
 if (namedCallbacks.containsKey(name)) {
   namedCallbacks.remove(name);
 }
+DefaultMetricsSystem.removeSourceName(name);
   }
 
   synchronized

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99cc439e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
index c761b58..935f47f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
@@ -116,6 +116,11 @@ public enum DefaultMetricsSystem {
   }
 
   @InterfaceAudience.Private
+  public static void removeSourceName(String name) {
+INSTANCE.removeSource(name);
+  }
+
+  @InterfaceAudience.Private
   public static String sourceName(String name, boolean dupOK) {
 return INSTANCE.newSourceName(name, dupOK);
   }
@@ -135,6 +140,10 @@ public enum DefaultMetricsSystem {
 mBeanNames.map.remove(name);
   }
 
+  synchronized void removeSource(String name) {
+sourceNames.map.remove(name);
+  }
+
   synchronized String newSourceName(String name, boolean dupOK) {
 if (sourceNames.map.containsKey(name)) {
   if (dupOK) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99cc439e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
index d59abda..31a9aa7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
@@ -198,6 +198,12 @@ public class ContainerMetrics implements MetricsSource {
 DefaultMetricsSystem.instance(), containerId, flushPeriodMs, delayMs);
   }
 
+  public synchronized static ContainerMetrics getContainerMetrics(
+  ContainerId containerId) {
+// could be null

[42/47] hadoop git commit: YARN-5185. StageAllocaterGreedyRLE: Fix NPE in corner case. (Carlo Curino via asuresh)

2016-06-07 Thread aengineer
YARN-5185. StageAllocaterGreedyRLE: Fix NPE in corner case. (Carlo Curino via 
asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a9b7372
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a9b7372
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a9b7372

Branch: refs/heads/HDFS-1312
Commit: 7a9b7372a1a917c7b5e1beca7e13c0419e3dbfef
Parents: 6de9213
Author: Arun Suresh 
Authored: Mon Jun 6 21:06:52 2016 -0700
Committer: Arun Suresh 
Committed: Mon Jun 6 21:06:52 2016 -0700

--
 .../planning/StageAllocatorGreedyRLE.java   | 16 
 1 file changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a9b7372/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
index c5a3192..5e748fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
@@ -168,12 +168,20 @@ public class StageAllocatorGreedyRLE implements 
StageAllocator {
   if (allocateLeft) {
 // set earliest start to the min of the constraining "range" or my the
 // end of this allocation
-stageEarliestStart =
-Math.min(partialMap.higherKey(minPoint), stageEarliestStart + dur);
+if(partialMap.higherKey(minPoint) == null){
+  stageEarliestStart = stageEarliestStart + dur;
+} else {
+  stageEarliestStart =
+ Math.min(partialMap.higherKey(minPoint), stageEarliestStart + 
dur);
+}
   } else {
 // same as above moving right-to-left
-stageDeadline =
-Math.max(partialMap.higherKey(minPoint), stageDeadline - dur);
+if(partialMap.higherKey(minPoint) == null){
+  stageDeadline = stageDeadline - dur;
+} else {
+  stageDeadline =
+  Math.max(partialMap.higherKey(minPoint), stageDeadline - dur);
+}
   }
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/47] hadoop git commit: HADOOP-10048. LocalDirAllocator should avoid holding locks while accessing the filesystem. Contributed by Jason Lowe.

2016-06-07 Thread aengineer
HADOOP-10048. LocalDirAllocator should avoid holding locks while accessing the 
filesystem. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c14c1b29
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c14c1b29
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c14c1b29

Branch: refs/heads/HDFS-1312
Commit: c14c1b298e29e799f7c8f15ff24d7eba6e0cd39b
Parents: e620530
Author: Junping Du 
Authored: Tue Jun 7 09:18:58 2016 -0700
Committer: Junping Du 
Committed: Tue Jun 7 09:18:58 2016 -0700

--
 .../org/apache/hadoop/fs/LocalDirAllocator.java | 153 ---
 1 file changed, 94 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c14c1b29/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index 70cf87d..b14e1f0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -20,9 +20,10 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.commons.logging.*;
-
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -247,74 +248,101 @@ public class LocalDirAllocator {
 private final Log LOG =
   LogFactory.getLog(AllocatorPerContext.class);
 
-private int dirNumLastAccessed;
 private Random dirIndexRandomizer = new Random();
-private FileSystem localFS;
-private DF[] dirDF = new DF[0];
 private String contextCfgItemName;
-private String[] localDirs = new String[0];
-private String savedLocalDirs = "";
+
+// NOTE: the context must be accessed via a local reference as it
+//   may be updated at any time to reference a different context
+private AtomicReference currentContext;
+
+private static class Context {
+  private AtomicInteger dirNumLastAccessed = new AtomicInteger(0);
+  private FileSystem localFS;
+  private DF[] dirDF;
+  private Path[] localDirs;
+  private String savedLocalDirs;
+
+  public int getAndIncrDirNumLastAccessed() {
+return getAndIncrDirNumLastAccessed(1);
+  }
+
+  public int getAndIncrDirNumLastAccessed(int delta) {
+if (localDirs.length < 2 || delta == 0) {
+  return dirNumLastAccessed.get();
+}
+int oldval, newval;
+do {
+  oldval = dirNumLastAccessed.get();
+  newval = (oldval + delta) % localDirs.length;
+} while (!dirNumLastAccessed.compareAndSet(oldval, newval));
+return oldval;
+  }
+}
 
 public AllocatorPerContext(String contextCfgItemName) {
   this.contextCfgItemName = contextCfgItemName;
+  this.currentContext = new AtomicReference(new Context());
 }
 
 /** This method gets called everytime before any read/write to make sure
  * that any change to localDirs is reflected immediately.
  */
-private synchronized void confChanged(Configuration conf) 
+private Context confChanged(Configuration conf)
 throws IOException {
+  Context ctx = currentContext.get();
   String newLocalDirs = conf.get(contextCfgItemName);
   if (null == newLocalDirs) {
 throw new IOException(contextCfgItemName + " not configured");
   }
-  if (!newLocalDirs.equals(savedLocalDirs)) {
-localDirs = StringUtils.getTrimmedStrings(newLocalDirs);
-localFS = FileSystem.getLocal(conf);
-int numDirs = localDirs.length;
-ArrayList dirs = new ArrayList(numDirs);
+  if (!newLocalDirs.equals(ctx.savedLocalDirs)) {
+ctx = new Context();
+String[] dirStrings = StringUtils.getTrimmedStrings(newLocalDirs);
+ctx.localFS = FileSystem.getLocal(conf);
+int numDirs = dirStrings.length;
+ArrayList dirs = new ArrayList(numDirs);
 ArrayList dfList = new ArrayList(numDirs);
 for (int i = 0; i < numDirs; i++) {
   try {
 // filter problematic directories
-Path tmpDir = new Path(localDirs[i]);
-if(localFS.mkdirs(tmpDir)|| localFS.exists(tmpDir)) {
+Path tmpDir = new Path(dirStrings[i]);
+if(ctx.localFS.mkdirs(tmpDir)|| ctx.localFS.exists(tmpDir)) 

[18/47] hadoop git commit: HADOOP-13105. Support timeouts in LDAP queries in LdapGroupsMapping. Contributed by Mingliang Liu.

2016-06-07 Thread aengineer
HADOOP-13105. Support timeouts in LDAP queries in LdapGroupsMapping. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d82bc850
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d82bc850
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d82bc850

Branch: refs/heads/HDFS-1312
Commit: d82bc8501869be78780fc09752dbf7af918c14af
Parents: 78b3a03
Author: Chris Nauroth 
Authored: Fri Jun 3 16:38:30 2016 -0700
Committer: Chris Nauroth 
Committed: Fri Jun 3 16:38:30 2016 -0700

--
 .../hadoop/security/LdapGroupsMapping.java  |  12 ++
 .../src/main/resources/core-default.xml |  24 
 .../hadoop/security/TestLdapGroupsMapping.java  | 140 +++
 3 files changed, 176 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d82bc850/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index 498b92e..da87369 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -179,6 +179,13 @@ public class LdapGroupsMapping
 LDAP_CONFIG_PREFIX + ".directory.search.timeout";
   public static final int DIRECTORY_SEARCH_TIMEOUT_DEFAULT = 1; // 10s
 
+  public static final String CONNECTION_TIMEOUT =
+  LDAP_CONFIG_PREFIX + ".connection.timeout.ms";
+  public static final int CONNECTION_TIMEOUT_DEFAULT = 60 * 1000; // 60 seconds
+  public static final String READ_TIMEOUT =
+  LDAP_CONFIG_PREFIX + ".read.timeout.ms";
+  public static final int READ_TIMEOUT_DEFAULT = 60 * 1000; // 60 seconds
+
   private static final Log LOG = LogFactory.getLog(LdapGroupsMapping.class);
 
   private static final SearchControls SEARCH_CONTROLS = new SearchControls();
@@ -432,6 +439,11 @@ public class LdapGroupsMapping
   env.put(Context.SECURITY_PRINCIPAL, bindUser);
   env.put(Context.SECURITY_CREDENTIALS, bindPassword);
 
+  env.put("com.sun.jndi.ldap.connect.timeout", conf.get(CONNECTION_TIMEOUT,
+  String.valueOf(CONNECTION_TIMEOUT_DEFAULT)));
+  env.put("com.sun.jndi.ldap.read.timeout", conf.get(READ_TIMEOUT,
+  String.valueOf(READ_TIMEOUT_DEFAULT)));
+
   ctx = new InitialDirContext(env);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d82bc850/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index b3f8cd5..a65246b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -166,6 +166,30 @@
 
 
 
+  hadoop.security.group.mapping.ldap.connection.timeout.ms
+  6
+  
+This property is the connection timeout (in milliseconds) for LDAP
+operations. If the LDAP provider doesn't establish a connection within the
+specified period, it will abort the connect attempt. Non-positive value
+means no LDAP connection timeout is specified in which case it waits for 
the
+connection to establish until the underlying network times out.
+  
+
+
+
+  hadoop.security.group.mapping.ldap.read.timeout.ms
+  6
+  
+This property is the read timeout (in milliseconds) for LDAP
+operations. If the LDAP provider doesn't get a LDAP response within the
+specified period, it will abort the read attempt. Non-positive value
+means no read timeout is specified in which case it waits for the response
+infinitely.
+  
+
+
+
   hadoop.security.group.mapping.ldap.url
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d82bc850/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
index 9319016..9f9f994 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
+++ 

[44/47] hadoop git commit: YARN-5118. Tests fails with localizer port bind exception. Contributed by Brahma Reddy Battula.

2016-06-07 Thread aengineer
YARN-5118. Tests fails with localizer port bind exception. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bddea5fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bddea5fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bddea5fe

Branch: refs/heads/HDFS-1312
Commit: bddea5fe5fe72eee8e2ecfcec616bd8ceb4d72e7
Parents: 3a154f7
Author: Rohith Sharma K S 
Authored: Tue Jun 7 11:20:15 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Jun 7 11:20:15 2016 +0530

--
 .../apache/hadoop/yarn/server/nodemanager/TestEventFlow.java  | 3 +++
 .../server/nodemanager/TestNodeStatusUpdaterForLabels.java| 7 +++
 .../containermanager/BaseContainerManagerTest.java| 3 +++
 3 files changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddea5fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
index f126080..a9ff83c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
@@ -25,6 +25,7 @@ import java.util.List;
 
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
@@ -91,6 +92,8 @@ public class TestEventFlow {
 conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
 conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, 
 remoteLogDir.getAbsolutePath());
+conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:"
++ ServerSocketUtil.getPort(8040, 10));
 
 ContainerExecutor exec = new DefaultContainerExecutor();
 exec.setConf(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddea5fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
index 563104e..257e18c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
@@ -28,6 +28,7 @@ import java.nio.ByteBuffer;
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.service.ServiceOperations;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -233,6 +234,9 @@ public class TestNodeStatusUpdaterForLabels extends 
NodeLabelTestBase {
 
 YarnConfiguration conf = createNMConfigForDistributeNodeLabels();
 conf.setLong(YarnConfiguration.NM_NODE_LABELS_RESYNC_INTERVAL, 2000);
+conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:"
++ ServerSocketUtil.getPort(8040, 10));
+
 nm.init(conf);
 resourceTracker.resetNMHeartbeatReceiveFlag();
 nm.start();
@@ -329,6 +333,9 @@ public class TestNodeStatusUpdaterForLabels extends 
NodeLabelTestBase {
 };
 dummyLabelsProviderRef.setNodeLabels(toNodeLabelSet("P"));
 YarnConfiguration conf = createNMConfigForDistributeNodeLabels();
+conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:"
++ 

[39/47] hadoop git commit: MAPREDUCE-5044. Have AM trigger jstack on task attempts that timeout before killing them. (Eric Payne and Gera Shegalov via mingma)

2016-06-07 Thread aengineer
MAPREDUCE-5044. Have AM trigger jstack on task attempts that timeout before 
killing them. (Eric Payne and Gera Shegalov via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a1cedc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a1cedc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a1cedc0

Branch: refs/heads/HDFS-1312
Commit: 4a1cedc010d3fa1d8ef3f2773ca12acadfee5ba5
Parents: 35f255b
Author: Ming Ma 
Authored: Mon Jun 6 14:30:51 2016 -0700
Committer: Ming Ma 
Committed: Mon Jun 6 14:30:51 2016 -0700

--
 .../hadoop/mapred/LocalContainerLauncher.java   |  28 +
 .../v2/app/job/impl/TaskAttemptImpl.java|   5 +-
 .../v2/app/launcher/ContainerLauncherEvent.java |  21 +++-
 .../v2/app/launcher/ContainerLauncherImpl.java  |  19 ++-
 .../v2/app/launcher/TestContainerLauncher.java  |  10 +-
 .../app/launcher/TestContainerLauncherImpl.java |   8 ++
 .../hadoop/mapred/ResourceMgrDelegate.java  |   5 +-
 .../hadoop/mapred/TestClientRedirect.java   |   2 +-
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  | 119 +++
 .../yarn/api/ApplicationClientProtocol.java |   2 +-
 .../yarn/api/ContainerManagementProtocol.java   |   5 +
 .../SignalContainerResponse.java|   2 +-
 .../main/proto/applicationclient_protocol.proto |   2 +-
 .../proto/containermanagement_protocol.proto|   1 +
 .../hadoop/yarn/client/api/YarnClient.java  |   2 +-
 .../yarn/client/api/impl/YarnClientImpl.java|   4 +-
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |   6 +-
 .../yarn/client/api/impl/TestYarnClient.java|   4 +-
 .../yarn/api/ContainerManagementProtocolPB.java |   7 ++
 .../ApplicationClientProtocolPBClientImpl.java  |   4 +-
 ...ContainerManagementProtocolPBClientImpl.java |  19 +++
 .../ApplicationClientProtocolPBServiceImpl.java |   5 +-
 ...ontainerManagementProtocolPBServiceImpl.java |  20 
 .../hadoop/yarn/TestContainerLaunchRPC.java |  10 ++
 .../yarn/TestContainerResourceIncreaseRPC.java  |   8 ++
 .../java/org/apache/hadoop/yarn/TestRPC.java|  10 ++
 .../containermanager/ContainerManagerImpl.java  |  38 --
 .../amrmproxy/MockResourceManagerFacade.java|   2 +-
 .../server/resourcemanager/ClientRMService.java |   2 +-
 .../yarn/server/resourcemanager/MockRM.java |   6 +-
 .../server/resourcemanager/NodeManager.java |   9 +-
 .../resourcemanager/TestAMAuthorization.java|   8 ++
 .../TestApplicationMasterLauncher.java  |   8 ++
 .../resourcemanager/TestSignalContainer.java|   2 +-
 34 files changed, 361 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a1cedc0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index da118c5..190d988 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -20,6 +20,10 @@ package org.apache.hadoop.mapred;
 
 import java.io.File;
 import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -255,6 +259,30 @@ public class LocalContainerLauncher extends 
AbstractService implements
 
 } else if (event.getType() == EventType.CONTAINER_REMOTE_CLEANUP) {
 
+  if (event.getDumpContainerThreads()) {
+try {
+  // Construct full thread dump header
+  System.out.println(new java.util.Date());
+  RuntimeMXBean rtBean = ManagementFactory.getRuntimeMXBean();
+  System.out.println("Full thread dump " + rtBean.getVmName()
+  + " (" + rtBean.getVmVersion()
+  + " " + rtBean.getSystemProperties().get("java.vm.info")
+  + "):\n");
+  // Dump threads' states and stacks
+  ThreadMXBean tmxBean = ManagementFactory.getThreadMXBean();
+  ThreadInfo[] tInfos = tmxBean.dumpAllThreads(
+  

[26/47] hadoop git commit: Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for DistributedFileSystem. Contributed by Xiaobing Zhou"

2016-06-07 Thread aengineer
Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for 
DistributedFileSystem.  Contributed by Xiaobing Zhou"

This reverts commit 02d4e478a398c24a5e5e8ea2b0822a5b9d4a97ae.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b82c74b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b82c74b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b82c74b9

Branch: refs/heads/HDFS-1312
Commit: b82c74b9102ba95eae776501ed4484be9edd8c96
Parents: 5ee5912
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:14 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:14 2016 -0700

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../hadoop/hdfs/DistributedFileSystem.java  |   3 -
 .../ClientNamenodeProtocolTranslatorPB.java |  30 +-
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 310 ---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  |  15 +-
 .../hdfs/server/namenode/FSAclBaseTest.java |  12 +-
 6 files changed, 18 insertions(+), 411 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b82c74b9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 29bac2a..6bfd71d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -19,16 +19,12 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
-import java.util.List;
 import java.util.concurrent.Future;
 
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.util.concurrent.AsyncGetFuture;
 import org.apache.hadoop.ipc.Client;
@@ -87,7 +83,6 @@ public class AsyncDistributedFileSystem {
   public Future rename(Path src, Path dst,
   final Options.Rename... options) throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.RENAME);
 
 final Path absSrc = dfs.fixRelativePart(src);
 final Path absDst = dfs.fixRelativePart(dst);
@@ -116,7 +111,6 @@ public class AsyncDistributedFileSystem {
   public Future setPermission(Path p, final FsPermission permission)
   throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_PERMISSION);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -148,7 +142,6 @@ public class AsyncDistributedFileSystem {
 }
 
 dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_OWNER);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -159,56 +152,4 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
-
-  /**
-   * Fully replaces ACL of files and directories, discarding all existing
-   * entries.
-   *
-   * @param p
-   *  Path to modify
-   * @param aclSpec
-   *  List describing modifications, must include entries for
-   *  user, group, and others for compatibility with permission bits.
-   * @throws IOException
-   *   if an ACL could not be modified
-   * @return an instance of Future, #get of which is invoked to wait for
-   * asynchronous call being finished.
-   */
-  public Future setAcl(Path p, final List aclSpec)
-  throws IOException {
-dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_ACL);
-final Path absPath = dfs.fixRelativePart(p);
-final boolean isAsync = Client.isAsynchronousMode();
-Client.setAsynchronousMode(true);
-try {
-  dfs.getClient().setAcl(dfs.getPathName(absPath), aclSpec);
-  return getReturnValue();
-} finally {
-  

  1   2   >