[47/50] [abbrv] hadoop git commit: HADOOP-14440. Add metrics for connections dropped. Contributed by Eric Badger.

2017-06-05 Thread xgong
HADOOP-14440. Add metrics for connections dropped. Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5ed9c78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5ed9c78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5ed9c78

Branch: refs/heads/YARN-5734
Commit: d5ed9c783dec78c8e6f79991664f94bc5534471b
Parents: 216a0a0
Author: Brahma Reddy Battula 
Authored: Tue Jun 6 00:21:03 2017 +0800
Committer: Xuan 
Committed: Mon Jun 5 13:29:56 2017 -0700

--
 .../main/java/org/apache/hadoop/ipc/Server.java | 20 +++-
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   |  4 
 .../hadoop-common/src/site/markdown/Metrics.md  |  1 +
 .../java/org/apache/hadoop/ipc/TestIPC.java |  8 +---
 4 files changed, 29 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ed9c78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 3ea5a24..f3b9a82 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -64,6 +64,7 @@ import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
 
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
@@ -1220,6 +1221,7 @@ public abstract class Server {
   if (channel.isOpen()) {
 IOUtils.cleanup(null, channel);
   }
+  connectionManager.droppedConnections.getAndIncrement();
   continue;
 }
 key.attach(c);  // so closeCurrentConnection can get the object
@@ -3161,6 +3163,16 @@ public abstract class Server {
   }
 
   /**
+   * The number of RPC connections dropped due to
+   * too many connections.
+   * @return the number of dropped rpc connections
+   */
+  public long getNumDroppedConnections() {
+return connectionManager.getDroppedConnections();
+
+  }
+
+  /**
* The number of rpc calls in the queue.
* @return The number of rpc calls in the queue.
*/
@@ -3277,7 +3289,8 @@ public abstract class Server {
   }
   
   private class ConnectionManager {
-final private AtomicInteger count = new AtomicInteger();
+final private AtomicInteger count = new AtomicInteger();
+final private AtomicLong droppedConnections = new AtomicLong();
 final private Set connections;
 /* Map to maintain the statistics per User */
 final private Map userToConnectionsMap;
@@ -3364,6 +3377,11 @@ public abstract class Server {
   return userToConnectionsMap;
 }
 
+
+long getDroppedConnections() {
+  return droppedConnections.get();
+}
+
 int size() {
   return count.get();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ed9c78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
index e5dde10..8ce1379 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
@@ -121,6 +121,10 @@ public class RpcMetrics {
 return server.getCallQueueLen();
   }
 
+  @Metric("Number of dropped connections") public long numDroppedConnections() 
{
+return server.getNumDroppedConnections();
+  }
+
   // Public instrumentation methods that could be extracted to an
   // abstract class if we decide to do custom instrumentation classes a la
   // JobTrackerInstrumentation. The methods with //@Override comment are

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ed9c78/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index a14c86d..4b89bc2 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ 

[06/50] [abbrv] hadoop git commit: YARN-6555. Store application flow context in NM state store for work-preserving restart. (Rohith Sharma K S via Haibo Chen)

2017-06-05 Thread xgong
YARN-6555. Store application flow context in NM state store for work-preserving 
restart. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65cbe43a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65cbe43a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65cbe43a

Branch: refs/heads/YARN-5734
Commit: 65cbe43a3628cdf70afd2c802c2b763cfcc668e2
Parents: 3efac7d
Author: Haibo Chen 
Authored: Thu May 25 21:15:27 2017 -0700
Committer: Xuan 
Committed: Mon Jun 5 13:29:21 2017 -0700

--
 .../containermanager/ContainerManagerImpl.java  | 71 +---
 .../application/ApplicationImpl.java| 27 ++--
 .../yarn_server_nodemanager_recovery.proto  |  7 ++
 .../TestContainerManagerRecovery.java   | 40 +--
 4 files changed, 111 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65cbe43a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index f65f1ac..50268b9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto;
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
+import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.FlowContextProto;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.ContainerType;
@@ -381,10 +382,20 @@ public class ContainerManagerImpl extends 
CompositeService implements
   new LogAggregationContextPBImpl(p.getLogAggregationContext());
 }
 
+FlowContext fc = null;
+if (p.getFlowContext() != null) {
+  FlowContextProto fcp = p.getFlowContext();
+  fc = new FlowContext(fcp.getFlowName(), fcp.getFlowVersion(),
+  fcp.getFlowRunId());
+  if (LOG.isDebugEnabled()) {
+LOG.debug(
+"Recovering Flow context: " + fc + " for an application " + appId);
+  }
+}
+
 LOG.info("Recovering application " + appId);
-//TODO: Recover flow and flow run ID
-ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), appId,
-creds, context, p.getAppLogAggregationInitedTime());
+ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), fc,
+appId, creds, context, p.getAppLogAggregationInitedTime());
 context.getApplications().put(appId, app);
 app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext));
   }
@@ -936,7 +947,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
   private ContainerManagerApplicationProto buildAppProto(ApplicationId appId,
   String user, Credentials credentials,
   Map appAcls,
-  LogAggregationContext logAggregationContext) {
+  LogAggregationContext logAggregationContext, FlowContext flowContext) {
 
 ContainerManagerApplicationProto.Builder builder =
 ContainerManagerApplicationProto.newBuilder();
@@ -971,6 +982,16 @@ public class ContainerManagerImpl extends CompositeService 
implements
   }
 }
 
+builder.clearFlowContext();
+if (flowContext != null && flowContext.getFlowName() != null
+&& flowContext.getFlowVersion() != null) {
+  FlowContextProto fcp =
+  FlowContextProto.newBuilder().setFlowName(flowContext.getFlowName())
+  .setFlowVersion(flowContext.getFlowVersion())
+  .setFlowRunId(flowContext.getFlowRunId()).build();
+  builder.setFlowContext(fcp);
+}
+
 return builder.build();
   }
 
@@ -1016,25 +1037,29 @@ public class ContainerManagerImpl extends 
CompositeService 

[29/50] [abbrv] hadoop git commit: YARN-6246. Identifying starved apps does not need the scheduler writelock (Contributed by Karthik Kambatla via Daniel Templeton)

2017-06-05 Thread xgong
YARN-6246. Identifying starved apps does not need the scheduler writelock
(Contributed by Karthik Kambatla via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e648f18a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e648f18a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e648f18a

Branch: refs/heads/YARN-5734
Commit: e648f18a082aaec9c2a132bca0ea6a5741f01c5b
Parents: 9560fd5
Author: Daniel Templeton 
Authored: Wed May 31 15:48:04 2017 -0700
Committer: Xuan 
Committed: Mon Jun 5 13:29:41 2017 -0700

--
 .../scheduler/fair/FSLeafQueue.java |  9 +++
 .../scheduler/fair/FSParentQueue.java   |  4 +--
 .../resourcemanager/scheduler/fair/FSQueue.java | 19 +-
 .../scheduler/fair/FairScheduler.java   | 27 ++--
 4 files changed, 38 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e648f18a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 10f1e28..1de0e30 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -198,13 +198,10 @@ public class FSLeafQueue extends FSQueue {
   }
 
   @Override
-  public void updateInternal(boolean checkStarvation) {
+  void updateInternal() {
 readLock.lock();
 try {
   policy.computeShares(runnableApps, getFairShare());
-  if (checkStarvation) {
-updateStarvedApps();
-  }
 } finally {
   readLock.unlock();
 }
@@ -283,8 +280,10 @@ public class FSLeafQueue extends FSQueue {
* If this queue is starving due to fairshare, there must be at least
* one application that is starved. And, even if the queue is not
* starved due to fairshare, there might still be starved applications.
+   *
+   * Caller does not need read/write lock on the leaf queue.
*/
-  private void updateStarvedApps() {
+  void updateStarvedApps() {
 // Fetch apps with pending demand
 TreeSet appsWithDemand = fetchAppsWithDemand(false);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e648f18a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index b062c58..5b4e4dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -79,13 +79,13 @@ public class FSParentQueue extends FSQueue {
   }
 
   @Override
-  public void updateInternal(boolean checkStarvation) {
+  void updateInternal() {
 readLock.lock();
 try {
   policy.computeShares(childQueues, getFairShare());
   for (FSQueue childQueue : childQueues) {
 childQueue.getMetrics().setFairShare(childQueue.getFairShare());
-childQueue.updateInternal(checkStarvation);
+childQueue.updateInternal();
   }
 } finally {
   readLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e648f18a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java

hadoop git commit: YARN-6189: Improve application status log message when RM restarted when app is in NEW state. Contributed by Junping Du

2017-02-28 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 df35ba81f -> 95bd3c3d5


YARN-6189: Improve application status log message when RM restarted when
app is in NEW state. Contributed by Junping Du

(cherry picked from commit e0bb867c3fa638c9f689ee0b044b400481cf02b5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95bd3c3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95bd3c3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95bd3c3d

Branch: refs/heads/branch-2
Commit: 95bd3c3d55625c732299dbf35d32f7690caee829
Parents: df35ba8
Author: Xuan 
Authored: Tue Feb 28 11:04:56 2017 -0800
Committer: Xuan 
Committed: Tue Feb 28 11:06:07 2017 -0800

--
 .../yarn/server/resourcemanager/ClientRMService.java | 15 ++-
 .../server/resourcemanager/TestClientRMService.java  |  3 ++-
 2 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bd3c3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 10745a9..e468813 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -357,7 +357,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '"
-  + applicationId + "' doesn't exist in RM.");
+  + applicationId + "' doesn't exist in RM. Please check "
+  + "that the job submission was successful.");
 }
 
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
@@ -391,7 +392,8 @@ public class ClientRMService extends AbstractService 
implements
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '"
   + request.getApplicationAttemptId().getApplicationId()
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job "
+  + "submission was successful.");
 }
 
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
@@ -430,7 +432,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);
@@ -478,7 +481,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);
@@ -528,7 +532,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);


hadoop git commit: YARN-6189: Improve application status log message when RM restarted when app is in NEW state. Contributed by Junping Du

2017-02-28 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk eac6b4c35 -> e0bb867c3


YARN-6189: Improve application status log message when RM restarted when
app is in NEW state. Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0bb867c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0bb867c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0bb867c

Branch: refs/heads/trunk
Commit: e0bb867c3fa638c9f689ee0b044b400481cf02b5
Parents: eac6b4c
Author: Xuan 
Authored: Tue Feb 28 11:04:56 2017 -0800
Committer: Xuan 
Committed: Tue Feb 28 11:04:56 2017 -0800

--
 .../yarn/server/resourcemanager/ClientRMService.java | 15 ++-
 .../server/resourcemanager/TestClientRMService.java  |  3 ++-
 2 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0bb867c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 48bccfb..929a9e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -359,7 +359,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '"
-  + applicationId + "' doesn't exist in RM.");
+  + applicationId + "' doesn't exist in RM. Please check "
+  + "that the job submission was successful.");
 }
 
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
@@ -393,7 +394,8 @@ public class ClientRMService extends AbstractService 
implements
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '"
   + request.getApplicationAttemptId().getApplicationId()
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job "
+  + "submission was successful.");
 }
 
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
@@ -432,7 +434,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);
@@ -480,7 +483,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);
@@ -530,7 +534,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);


hadoop git commit: YARN-5946: Create YarnConfigurationStore interface and InMemoryConfigurationStore class. Contributed by Jonathan Hung

2017-02-24 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 42a814a66 -> c12bed65a


YARN-5946: Create YarnConfigurationStore interface and
InMemoryConfigurationStore class. Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c12bed65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c12bed65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c12bed65

Branch: refs/heads/YARN-5734
Commit: c12bed65af7e9cfcd55e2d553475fca488fe0614
Parents: 42a814a
Author: Xuan 
Authored: Fri Feb 24 15:58:12 2017 -0800
Committer: Xuan 
Committed: Fri Feb 24 15:58:12 2017 -0800

--
 .../conf/InMemoryConfigurationStore.java|  86 +++
 .../capacity/conf/YarnConfigurationStore.java   | 154 +++
 .../conf/TestYarnConfigurationStore.java|  70 +
 3 files changed, 310 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c12bed65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
new file mode 100644
index 000..a208fb9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A default implementation of {@link YarnConfigurationStore}. Doesn't offer
+ * persistent configuration storage, just stores the configuration in memory.
+ */
+public class InMemoryConfigurationStore implements YarnConfigurationStore {
+
+  private Configuration schedConf;
+  private LinkedList pendingMutations;
+  private long pendingId;
+
+  @Override
+  public void initialize(Configuration conf, Configuration schedConf) {
+this.schedConf = schedConf;
+this.pendingMutations = new LinkedList<>();
+this.pendingId = 0;
+  }
+
+  @Override
+  public synchronized long logMutation(LogMutation logMutation) {
+logMutation.setId(++pendingId);
+pendingMutations.add(logMutation);
+return pendingId;
+  }
+
+  @Override
+  public synchronized boolean confirmMutation(long id, boolean isValid) {
+LogMutation mutation = pendingMutations.poll();
+// If confirmMutation is called out of order, discard mutations until id
+// is reached.
+while (mutation != null) {
+  if (mutation.getId() == id) {
+if (isValid) {
+  Map mutations = mutation.getUpdates();
+  for (Map.Entry kv : mutations.entrySet()) {
+schedConf.set(kv.getKey(), kv.getValue());
+  }
+}
+return true;
+  }
+  mutation = pendingMutations.poll();
+}
+return false;
+  }
+
+  @Override
+  public synchronized Configuration retrieve() {
+return schedConf;
+  }
+
+  @Override
+  public synchronized List getPendingMutations() {
+return pendingMutations;
+  }
+
+  @Override
+  public List getConfirmedConfHistory(long fromId) {
+// Unimplemented.
+return null;
+  }
+}


[15/26] hadoop git commit: YARN-6222. TestFairScheduler.testReservationMetrics is flaky. (Yufei Gu via kasha)

2017-02-24 Thread xgong
YARN-6222. TestFairScheduler.testReservationMetrics is flaky. (Yufei Gu via 
kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/694e680d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/694e680d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/694e680d

Branch: refs/heads/YARN-5734
Commit: 694e680d20dc07f634b539537021b09d9316601c
Parents: 159d6c5
Author: Karthik Kambatla 
Authored: Thu Feb 23 15:21:52 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Feb 23 15:21:52 2017 -0800

--
 .../yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java   | 3 +--
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java  | 1 +
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/694e680d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 59bde5b..d0e0961 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -407,8 +407,7 @@ public class FSLeafQueue extends FSQueue {
 readLock.lock();
 try {
   for (FSAppAttempt app : runnableApps) {
-Resource pending = app.getAppAttemptResourceUsage().getPending();
-if (!Resources.isNone(pending) &&
+if (!Resources.isNone(app.getPendingDemand()) &&
 (assignment || app.shouldCheckForStarvation())) {
   pendingForResourceApps.add(app);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/694e680d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 62430bf..31dd7fe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -5079,6 +5079,7 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 scheduler.handle(updateEvent);
 
 createSchedulingRequestExistingApplication(1024, 1, 1, appAttemptId);
+scheduler.update();
 scheduler.handle(updateEvent);
 
 // no reservation yet


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/26] hadoop git commit: YARN-6184. Introduce loading icon in each page of new YARN UI. Contributed by Akhil PB. [Forced Update!]

2017-02-24 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 6e1a54403 -> 42a814a66 (forced update)


YARN-6184. Introduce loading icon in each page of new YARN UI. Contributed by 
Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1c9cafe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1c9cafe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1c9cafe

Branch: refs/heads/YARN-5734
Commit: f1c9cafefc1940211b9fa0b77d2997ddb589af4e
Parents: 003ae00
Author: Sunil G 
Authored: Wed Feb 22 11:54:32 2017 +0530
Committer: Sunil G 
Committed: Wed Feb 22 11:54:32 2017 +0530

--
 .../main/webapp/app/components/tree-selector.js |   8 +-
 .../webapp/app/controllers/yarn-queue-apps.js   |  46 --
 .../main/webapp/app/controllers/yarn-queue.js   |  17 +++-
 .../main/webapp/app/controllers/yarn-queues.js  |   1 +
 .../src/main/webapp/app/router.js   |   6 +-
 .../main/webapp/app/routes/cluster-overview.js  |   4 +-
 .../main/webapp/app/routes/yarn-app-attempt.js  |  12 +--
 .../src/main/webapp/app/routes/yarn-apps.js |   4 +-
 .../src/main/webapp/app/routes/yarn-node.js |   4 +-
 .../src/main/webapp/app/routes/yarn-nodes.js|   4 +-
 .../main/webapp/app/routes/yarn-queue-apps.js   |  42 --
 .../src/main/webapp/app/routes/yarn-queue.js|   3 +-
 .../main/webapp/app/routes/yarn-queue/apps.js   |  22 +
 .../main/webapp/app/routes/yarn-queue/info.js   |  22 +
 .../src/main/webapp/app/routes/yarn-queues.js   |  35 +++-
 .../src/main/webapp/app/styles/app.css  |   8 ++
 .../src/main/webapp/app/templates/loading.hbs   |  23 +
 .../webapp/app/templates/yarn-apps/loading.hbs  |  23 +
 .../webapp/app/templates/yarn-queue-apps.hbs|  64 --
 .../main/webapp/app/templates/yarn-queue.hbs|  69 ++-
 .../webapp/app/templates/yarn-queue/apps.hbs|  28 +++
 .../webapp/app/templates/yarn-queue/info.hbs|  84 +++
 .../main/webapp/app/templates/yarn-queues.hbs   |   8 +-
 .../webapp/public/assets/images/spinner.gif | Bin 0 -> 33076 bytes
 .../unit/controllers/yarn-queue-apps-test.js|  30 ---
 .../tests/unit/routes/yarn-queue-apps-test.js   |  29 ---
 .../tests/unit/routes/yarn-queue/apps-test.js   |  29 +++
 .../tests/unit/routes/yarn-queue/info-test.js   |  29 +++
 28 files changed, 347 insertions(+), 307 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1c9cafe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
index c9e735d..3d72b2f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -128,7 +128,7 @@ export default Ember.Component.extend({
   .attr("transform", function() { return "translate(" + source.y0 + "," + 
source.x0 + ")"; })
   .on("mouseover", function(d){
 if (d.queueData.get("name") !== this.get("selected")) {
-document.location.href = "#/yarn-queues/" + 
d.queueData.get("name");
+document.location.href = "#/yarn-queues/" + 
d.queueData.get("name") + "!";
 }
 
 Ember.run.later(this, function () {
@@ -143,7 +143,7 @@ export default Ember.Component.extend({
 
   }.bind(this))
 .on("click", function (d) {
-  document.location.href = "#/yarn-queue/" + d.queueData.get("name");
+  document.location.href = "#/yarn-queue/" + d.queueData.get("name") + 
"/info";
 });
 
 nodeEnter.append("circle")
@@ -190,7 +190,7 @@ export default Ember.Component.extend({
 
 nodeUpdate.select("circle")
   .attr("r", 30)
-  .attr("href", 
+  .attr("href",
 function(d) {
   return "#/yarn-queues/" + d.queueData.get("name");
 })
@@ -294,4 +294,4 @@ export default Ember.Component.extend({
   didInsertElement: function() {
this.reDraw();
   }
-});
\ No newline at end of file
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1c9cafe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queue-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queue-apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queue-apps.js
deleted file 

[17/26] hadoop git commit: HADOOP-14114 S3A can no longer handle unencoded + in URIs. Contributed by Sean Mackrory.

2017-02-24 Thread xgong
HADOOP-14114 S3A can no longer handle unencoded + in URIs. Contributed by Sean 
Mackrory.

(cherry picked from commit ff87ca84418a710c6dc884fe8c70947fcc6489d5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c22a916
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c22a916
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c22a916

Branch: refs/heads/YARN-5734
Commit: 9c22a91662af24569191ce45289ef8266e8755cc
Parents: 132f758
Author: Steve Loughran 
Authored: Fri Feb 24 10:41:36 2017 +
Committer: Steve Loughran 
Committed: Fri Feb 24 10:41:36 2017 +

--
 .../hadoop/fs/s3native/S3xLoginHelper.java  | 15 ++-
 .../hadoop/fs/s3native/TestS3xLoginHelper.java  | 28 
 2 files changed, 42 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c22a916/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
index 97ece37..862ce6b 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
@@ -48,6 +48,13 @@ public final class S3xLoginHelper {
   "The Filesystem URI contains login details."
   +" This is insecure and may be unsupported in future.";
 
+  public static final String PLUS_WARNING =
+  "Secret key contains a special character that should be URL encoded! " +
+  "Attempting to resolve...";
+
+  public static final String PLUS_UNENCODED = "+";
+  public static final String PLUS_ENCODED = "%2B";
+
   /**
* Build the filesystem URI. This can include stripping down of part
* of the URI.
@@ -112,7 +119,13 @@ public final class S3xLoginHelper {
   int loginSplit = login.indexOf(':');
   if (loginSplit > 0) {
 String user = login.substring(0, loginSplit);
-String password = URLDecoder.decode(login.substring(loginSplit + 1),
+String encodedPassword = login.substring(loginSplit + 1);
+if (encodedPassword.contains(PLUS_UNENCODED)) {
+  LOG.warn(PLUS_WARNING);
+  encodedPassword = encodedPassword.replaceAll("\\" + PLUS_UNENCODED,
+  PLUS_ENCODED);
+}
+String password = URLDecoder.decode(encodedPassword,
 "UTF-8");
 return new Login(user, password);
   } else if (loginSplit == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c22a916/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java
index bd2ac1e..3761cb7 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java
@@ -32,9 +32,13 @@ public class TestS3xLoginHelper extends Assert {
   public static final String BUCKET = "s3a://bucket";
   private static final URI ENDPOINT = uri(BUCKET);
   public static final String S = "%2f";
+  public static final String P = "%2b";
+  public static final String P_RAW = "+";
   public static final String USER = "user";
   public static final String PASS = "pass";
   public static final String PASLASHSLASH = "pa" + S + S;
+  public static final String PAPLUS = "pa" + P;
+  public static final String PAPLUS_RAW = "pa" + P_RAW;
 
   public static final URI WITH_USER_AND_PASS = uri("s3a://user:pass@bucket");
   public static final Path PATH_WITH_LOGIN =
@@ -42,6 +46,10 @@ public class TestS3xLoginHelper extends Assert {
 
   public static final URI WITH_SLASH_IN_PASS = uri(
   "s3a://user:" + PASLASHSLASH + "@bucket");
+  public static final URI WITH_PLUS_IN_PASS = uri(
+  "s3a://user:" + PAPLUS + "@bucket");
+  public static final URI WITH_PLUS_RAW_IN_PASS = uri(
+  "s3a://user:" + PAPLUS_RAW + "@bucket");
   public static final URI USER_NO_PASS = uri("s3a://user@bucket");
   public static final URI WITH_USER_AND_COLON = uri("s3a://user:@bucket");
   public static final URI NO_USER = uri("s3a://:pass@bucket");
@@ -117,6 +125,16 @@ public class TestS3xLoginHelper extends Assert {
   }
 
   @Test
+  public void testLoginWithPlusInPass() throws Throwable {
+

[24/26] hadoop git commit: HADOOP-14116:FailoverOnNetworkExceptionRetry does not wait when failover on certain exception. Contributed by Jian He

2017-02-24 Thread xgong
HADOOP-14116:FailoverOnNetworkExceptionRetry does not wait when failover
on certain exception. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/289bc50e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/289bc50e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/289bc50e

Branch: refs/heads/YARN-5734
Commit: 289bc50e663b882956878eeaefe0eaa1ef4ed39e
Parents: 53d372a
Author: Xuan 
Authored: Fri Feb 24 11:42:23 2017 -0800
Committer: Xuan 
Committed: Fri Feb 24 11:42:23 2017 -0800

--
 .../src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/289bc50e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 0c523a5..d6f3e04 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -683,7 +683,8 @@ public class RetryPolicies {
   } else if (e instanceof SocketException
   || (e instanceof IOException && !(e instanceof RemoteException))) {
 if (isIdempotentOrAtMostOnce) {
-  return RetryAction.FAILOVER_AND_RETRY;
+  return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
+  getFailoverOrRetrySleepTime(retries));
 } else {
   return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
   "the invoked method is not idempotent, and unable to determine "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/26] hadoop git commit: YARN-4779. Fix AM container allocation logic in SLS. Contributed by Wangda Tan.

2017-02-24 Thread xgong
YARN-4779. Fix AM container allocation logic in SLS. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b32ffa27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b32ffa27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b32ffa27

Branch: refs/heads/YARN-5734
Commit: b32ffa2753e83615b980721b6067fcc35ce54372
Parents: e8694de
Author: Sunil G 
Authored: Fri Feb 24 21:39:25 2017 +0530
Committer: Sunil G 
Committed: Fri Feb 24 21:39:25 2017 +0530

--
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |  20 +-
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  |  89 +---
 .../yarn/sls/appmaster/MRAMSimulator.java   | 218 ---
 .../sls/resourcemanager/MockAMLauncher.java | 115 ++
 .../sls/scheduler/SLSCapacityScheduler.java |  24 ++
 5 files changed, 305 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b32ffa27/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 61738fb..61b7f36 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -32,6 +32,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 
 import com.fasterxml.jackson.core.JsonFactory;
 import com.fasterxml.jackson.databind.ObjectMapper;
@@ -55,12 +56,14 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.sls.appmaster.AMSimulator;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
 import org.apache.hadoop.yarn.sls.nodemanager.NMSimulator;
+import org.apache.hadoop.yarn.sls.resourcemanager.MockAMLauncher;
 import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
 import org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper;
 import org.apache.hadoop.yarn.sls.scheduler.SLSCapacityScheduler;
@@ -119,10 +122,10 @@ public class SLSRunner {
 this.printSimulation = printsimulation;
 metricsOutputDir = outputDir;
 
-nmMap = new HashMap();
-queueAppNumMap = new HashMap();
-amMap = new HashMap();
-amClassMap = new HashMap();
+nmMap = new HashMap<>();
+queueAppNumMap = new HashMap<>();
+amMap = new ConcurrentHashMap<>();
+amClassMap = new HashMap<>();
 
 // runner configuration
 conf = new Configuration(false);
@@ -179,7 +182,14 @@ public class SLSRunner {
 }
 
 rmConf.set(SLSConfiguration.METRICS_OUTPUT_DIR, metricsOutputDir);
-rm = new ResourceManager();
+
+final SLSRunner se = this;
+rm = new ResourceManager() {
+  @Override
+  protected ApplicationMasterLauncher createAMLauncher() {
+return new MockAMLauncher(se, this.rmContext, amMap);
+  }
+};
 rm.init(rmConf);
 rm.start();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b32ffa27/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
index d61bf02..5b03d51 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
@@ -49,6 +49,7 @@ import 
org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
 import 

[13/26] hadoop git commit: HADOOP-14091. AbstractFileSystem implementaion for 'wasbs' scheme. Contributed Varada Hemeswari.

2017-02-24 Thread xgong
HADOOP-14091. AbstractFileSystem implementaion for 'wasbs' scheme. Contributed 
Varada Hemeswari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82607fce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82607fce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82607fce

Branch: refs/heads/YARN-5734
Commit: 82607fce39151fc6ba5bced738088e2bc176dc77
Parents: a4d4a23
Author: Mingliang Liu 
Authored: Thu Feb 23 13:48:44 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 23 13:48:44 2017 -0800

--
 .../java/org/apache/hadoop/fs/azure/Wasbs.java  | 47 
 .../fs/azure/TestWasbUriAndConfiguration.java   | 57 
 2 files changed, 104 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82607fce/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/Wasbs.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/Wasbs.java 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/Wasbs.java
new file mode 100644
index 000..0b4a782
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/Wasbs.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DelegateToFileSystem;
+
+/**
+ * WASB implementation of AbstractFileSystem for wasbs scheme.
+ * This impl delegates to the old FileSystem
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class Wasbs extends DelegateToFileSystem {
+
+  Wasbs(final URI theUri, final Configuration conf) throws IOException,
+  URISyntaxException {
+super(theUri, new NativeAzureFileSystem(), conf, "wasbs", false);
+  }
+
+  @Override
+  public int getUriDefaultPort() {
+return -1;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82607fce/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
index 9d2770e..194a831 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
 import org.junit.After;
 import org.junit.Assert;
@@ -471,6 +472,62 @@ public class TestWasbUriAndConfiguration {
   assertTrue(afs instanceof Wasb);
   assertEquals(-1, afs.getUri().getPort());
 } finally {
+  testAccount.cleanup();
+  FileSystem.closeAll();
+}
+  }
+
+   /**
+   * Tests the cases when the scheme specified is 'wasbs'.
+   */
+  @Test
+  public void testAbstractFileSystemImplementationForWasbsScheme() throws 
Exception {
+try {
+  testAccount = AzureBlobStorageTestAccount.createMock();
+  Configuration conf = testAccount.getFileSystem().getConf();
+  String authority = testAccount.getFileSystem().getUri().getAuthority();
+  URI defaultUri = new URI("wasbs", authority, null, null, null);
+  conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+  

[23/26] hadoop git commit: YARN-6228: EntityGroupFSTimelineStore should allow configurable cache stores. Contributed by Li Lu

2017-02-24 Thread xgong
YARN-6228: EntityGroupFSTimelineStore should allow configurable cache
stores. Contributed by Li Lu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53d372a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53d372a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53d372a2

Branch: refs/heads/YARN-5734
Commit: 53d372a2550c970f3dd3c49738af3c1789ae589b
Parents: c1a52b0
Author: Xuan 
Authored: Fri Feb 24 10:58:48 2017 -0800
Committer: Xuan 
Committed: Fri Feb 24 10:59:35 2017 -0800

--
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 3 +++
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml  | 5 +
 .../apache/hadoop/yarn/server/timeline/EntityCacheItem.java | 9 +++--
 3 files changed, 15 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d372a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 094a424..cdccec6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1890,6 +1890,9 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX =
   TIMELINE_SERVICE_PREFIX + "entity-group-fs-store.";
 
+  public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CACHE_STORE 
=
+  TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "cache-store-class";
+
   public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR =
   TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "active-dir";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d372a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 53beb5e..368946e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2096,6 +2096,11 @@
   
 
   
+  
+yarn.timeline-service.entity-group-fs-store.cache-store-class
+org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore
+Caching storage timeline server v1.5 is using. 
+  
 
   
 yarn.timeline-service.entity-group-fs-store.active-dir

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d372a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
index 7ed7c4a..8df60ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
@@ -17,8 +17,10 @@
 package org.apache.hadoop.yarn.server.timeline;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -95,8 +97,11 @@ public class EntityCacheItem {
   }
   if (!appLogs.getDetailLogs().isEmpty()) {
 if (store == null) {
-  store = new LevelDBCacheTimelineStore(groupId.toString(),
-  "LeveldbCache." + groupId);
+  store = 

[02/26] hadoop git commit: HADOOP-14099 Split S3 testing documentation out into its own file. Contributed by Steve Loughran.

2017-02-24 Thread xgong
HADOOP-14099 Split S3 testing documentation out into its own file. Contributed 
by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f4250fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f4250fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f4250fb

Branch: refs/heads/YARN-5734
Commit: 4f4250fbccec6fd67a8bb7930f7f9e778a2faa6f
Parents: f1c9caf
Author: Steve Loughran 
Authored: Wed Feb 22 11:43:48 2017 +
Committer: Steve Loughran 
Committed: Wed Feb 22 11:43:48 2017 +

--
 .../src/site/markdown/tools/hadoop-aws/index.md | 568 +
 .../site/markdown/tools/hadoop-aws/testing.md   | 814 +++
 2 files changed, 844 insertions(+), 538 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f4250fb/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 7815bcf..3e99656 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -37,7 +37,12 @@ data between hadoop and other applications via the S3 object 
store.
 replacement for `s3n:`, this filesystem binding supports larger files and 
promises
 higher performance.
 
-The specifics of using these filesystems are documented below.
+The specifics of using these filesystems are documented in this section.
+
+
+See also:
+* [Testing](testing.html)
+* [Troubleshooting S3a](troubleshooting_s3a.html)
 
 ### Warning #1: Object Stores are not filesystems
 
@@ -1656,30 +1661,30 @@ $ bin/hadoop fs -ls s3a://frankfurt/
 WARN s3a.S3AFileSystem: Client: Amazon S3 error 400: 400 Bad Request; Bad 
Request (retryable)
 
 com.amazonaws.services.s3.model.AmazonS3Exception: Bad Request (Service: 
Amazon S3; Status Code: 400; Error Code: 400 Bad Request; Request ID: 
923C5D9E75E44C06), S3 Extended Request ID: 
HDwje6k+ANEeDsM6aJ8+D5gUmNAMguOk2BvZ8PH3g9z0gpH+IuwT7N19oQOnIr5CIx7Vqb/uThE=
-   at 
com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1182)
-   at 
com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:770)
-   at 
com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:489)
-   at 
com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:310)
-   at 
com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:3785)
-   at 
com.amazonaws.services.s3.AmazonS3Client.headBucket(AmazonS3Client.java:1107)
-   at 
com.amazonaws.services.s3.AmazonS3Client.doesBucketExist(AmazonS3Client.java:1070)
-   at 
org.apache.hadoop.fs.s3a.S3AFileSystem.verifyBucketExists(S3AFileSystem.java:307)
-   at 
org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:284)
-   at 
org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2793)
-   at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:101)
-   at 
org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2830)
-   at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2812)
-   at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:389)
-   at org.apache.hadoop.fs.Path.getFileSystem(Path.java:356)
-   at org.apache.hadoop.fs.shell.PathData.expandAsGlob(PathData.java:325)
-   at org.apache.hadoop.fs.shell.Command.expandArgument(Command.java:235)
-   at org.apache.hadoop.fs.shell.Command.expandArguments(Command.java:218)
-   at 
org.apache.hadoop.fs.shell.FsCommand.processRawArguments(FsCommand.java:103)
-   at org.apache.hadoop.fs.shell.Command.run(Command.java:165)
-   at org.apache.hadoop.fs.FsShell.run(FsShell.java:315)
-   at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:76)
-   at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:90)
-   at org.apache.hadoop.fs.FsShell.main(FsShell.java:373)
+at 
com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1182)
+at 
com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:770)
+at 
com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:489)
+at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:310)
+at 
com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:3785)
+at 
com.amazonaws.services.s3.AmazonS3Client.headBucket(AmazonS3Client.java:1107)
+at 
com.amazonaws.services.s3.AmazonS3Client.doesBucketExist(AmazonS3Client.java:1070)
+at 

[08/26] hadoop git commit: YARN-6210. FairScheduler: Node reservations can interfere with preemption. (kasha)

2017-02-24 Thread xgong
YARN-6210. FairScheduler: Node reservations can interfere with preemption. 
(kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/718ad9f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/718ad9f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/718ad9f6

Branch: refs/heads/YARN-5734
Commit: 718ad9f6ee93d4145f2bb19b7582ce4e1174feaf
Parents: 732ee6f
Author: Karthik Kambatla 
Authored: Wed Feb 22 15:45:45 2017 -0800
Committer: Karthik Kambatla 
Committed: Wed Feb 22 15:46:07 2017 -0800

--
 .../resource/DefaultResourceCalculator.java |   3 +-
 .../resource/DominantResourceCalculator.java|  13 +-
 .../yarn/util/resource/ResourceCalculator.java  |  32 -
 .../scheduler/fair/FSAppAttempt.java|  61 ++---
 .../DominantResourceFairnessPolicy.java |   8 +-
 .../fair/policies/FairSharePolicy.java  |   3 +-
 .../scheduler/fair/TestFairScheduler.java   | 127 ---
 .../fair/TestFairSchedulerPreemption.java   |  44 +--
 8 files changed, 180 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/718ad9f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 42c45ad..ef7229c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -30,7 +30,8 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   LogFactory.getLog(DefaultResourceCalculator.class);
 
   @Override
-  public int compare(Resource unused, Resource lhs, Resource rhs) {
+  public int compare(Resource unused, Resource lhs, Resource rhs,
+  boolean singleType) {
 // Only consider memory
 return Long.compare(lhs.getMemorySize(), rhs.getMemorySize());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/718ad9f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 9f1c8d7..032aa02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -51,17 +51,18 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   LogFactory.getLog(DominantResourceCalculator.class);
 
   @Override
-  public int compare(Resource clusterResource, Resource lhs, Resource rhs) {
+  public int compare(Resource clusterResource, Resource lhs, Resource rhs,
+  boolean singleType) {
 
 if (lhs.equals(rhs)) {
   return 0;
 }
 
 if (isInvalidDivisor(clusterResource)) {
-  if ((lhs.getMemorySize() < rhs.getMemorySize() && lhs.getVirtualCores() 
> rhs
-  .getVirtualCores())
-  || (lhs.getMemorySize() > rhs.getMemorySize() && 
lhs.getVirtualCores() < rhs
-  .getVirtualCores())) {
+  if ((lhs.getMemorySize() < rhs.getMemorySize() &&
+  lhs.getVirtualCores() > rhs.getVirtualCores()) ||
+  (lhs.getMemorySize() > rhs.getMemorySize() &&
+  lhs.getVirtualCores() < rhs.getVirtualCores())) {
 return 0;
   } else if (lhs.getMemorySize() > rhs.getMemorySize()
   || lhs.getVirtualCores() > rhs.getVirtualCores()) {
@@ -79,7 +80,7 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   return -1;
 } else if (l > r) {
   return 1;
-} else {
+} else if (!singleType) {
   l = getResourceAsValue(clusterResource, lhs, false);
   r = getResourceAsValue(clusterResource, rhs, false);
   if (l < r) {


[06/26] hadoop git commit: HDFS-11438. Fix typo in error message of StoragePolicyAdmin tool. Contributed by Alison Yu.

2017-02-24 Thread xgong
HDFS-11438. Fix typo in error message of StoragePolicyAdmin tool. Contributed 
by Alison Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d150f061
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d150f061
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d150f061

Branch: refs/heads/YARN-5734
Commit: d150f061f4ebde923fda28ea898a9606b8789758
Parents: 0013090
Author: Andrew Wang 
Authored: Wed Feb 22 15:16:09 2017 -0800
Committer: Andrew Wang 
Committed: Wed Feb 22 15:16:09 2017 -0800

--
 .../main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d150f061/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index 4e4f018..f0643b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -259,7 +259,7 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
   final String path = StringUtils.popOptionWithArgument("-path", args);
   if (path == null) {
 System.err.println("Please specify the path from which "
-+ "the storage policy will be unsetd.\nUsage: " + getLongUsage());
++ "the storage policy will be unset.\nUsage: " + getLongUsage());
 return 1;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/26] hadoop git commit: HADOOP-14113. Review ADL Docs. Contributed by Steve Loughran

2017-02-24 Thread xgong
HADOOP-14113. Review ADL Docs. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e60c6543
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e60c6543
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e60c6543

Branch: refs/heads/YARN-5734
Commit: e60c6543d57611039b0438d5dcb4cb19ee239bb6
Parents: 9c22a91
Author: Steve Loughran 
Authored: Fri Feb 24 13:24:59 2017 +
Committer: Steve Loughran 
Committed: Fri Feb 24 13:24:59 2017 +

--
 .../src/site/markdown/index.md  | 237 ++-
 1 file changed, 124 insertions(+), 113 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e60c6543/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index 6d9e173..9355241 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -20,20 +20,20 @@
 * [Usage](#Usage)
 * [Concepts](#Concepts)
 * [OAuth2 Support](#OAuth2_Support)
-* [Configuring Credentials & FileSystem](#Configuring_Credentials)
+* [Configuring Credentials and FileSystem](#Configuring_Credentials)
 * [Using Refresh Token](#Refresh_Token)
 * [Using Client Keys](#Client_Credential_Token)
 * [Protecting the Credentials with Credential 
Providers](#Credential_Provider)
 * [Enabling ADL Filesystem](#Enabling_ADL)
-* [Accessing adl URLs](#Accessing_adl_URLs)
+* [Accessing `adl` URLs](#Accessing_adl_URLs)
 * [User/Group Representation](#OIDtoUPNConfiguration)
-* [Testing the hadoop-azure Module](#Testing_the_hadoop-azure_Module)
+* [Testing the `hadoop-azure` Module](#Testing_the_hadoop-azure_Module)
 
 ## Introduction
 
-The hadoop-azure-datalake module provides support for integration with
-[Azure Data Lake Store]( 
https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
-The jar file is named azure-datalake-store.jar.
+The `hadoop-azure-datalake` module provides support for integration with the
+[Azure Data Lake 
Store](https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
+This support comes via the JAR file `azure-datalake-store.jar`.
 
 ## Features
 
@@ -43,13 +43,14 @@ The jar file is named azure-datalake-store.jar.
 * Can act as a source of data in a MapReduce job, or a sink.
 * Tested on both Linux and Windows.
 * Tested for scale.
-* API setOwner/setAcl/removeAclEntries/modifyAclEntries accepts UPN or OID
-  (Object ID) as user and group name.
+* API `setOwner()`, `setAcl`, `removeAclEntries()`, `modifyAclEntries()` 
accepts UPN or OID
+  (Object ID) as user and group names.
 
 ## Limitations
+
 Partial or no support for the following operations :
 
-* Operation on Symbolic Link
+* Operation on Symbolic Links
 * Proxy Users
 * File Truncate
 * File Checksum
@@ -58,55 +59,71 @@ Partial or no support for the following operations :
 * Extended Attributes(XAttrs) Operations
 * Snapshot Operations
 * Delegation Token Operations
-* User and group information returned as ListStatus and GetFileStatus is in 
form of GUID associated in Azure Active Directory.
+* User and group information returned as `listStatus()` and `getFileStatus()` 
is
+in the form of the GUID associated in Azure Active Directory.
 
 ## Usage
 
 ### Concepts
-Azure Data Lake Storage access path syntax is
+Azure Data Lake Storage access path syntax is:
+
+```
+adl://.azuredatalakestore.net/
+```
 
-adl://.azuredatalakestore.net/
+For details on using the store, see
+[**Get started with Azure Data Lake Store using the Azure 
Portal**](https://azure.microsoft.com/en-in/documentation/articles/data-lake-store-get-started-portal/)
 
-Get started with azure data lake account with 
[https://azure.microsoft.com/en-in/documentation/articles/data-lake-store-get-started-portal/](https://azure.microsoft.com/en-in/documentation/articles/data-lake-store-get-started-portal/)
+### OAuth2 Support
 
- OAuth2 Support
-Usage of Azure Data Lake Storage requires OAuth2 bearer token to be present as 
part of the HTTPS header as per OAuth2 specification. Valid OAuth2 bearer token 
should be obtained from Azure Active Directory for valid users who have  access 
to Azure Data Lake Storage Account.
+Usage of Azure Data Lake Storage requires an OAuth2 bearer token to be present 
as
+part of the HTTPS header as per the OAuth2 specification.
+A valid OAuth2 bearer token must be obtained from the Azure Active Directory 
service
+for those valid users who have access to Azure Data Lake 

[20/26] hadoop git commit: HADOOP-13817. Add a finite shell command timeout to ShellBasedUnixGroupsMapping. (harsh)

2017-02-24 Thread xgong
HADOOP-13817. Add a finite shell command timeout to 
ShellBasedUnixGroupsMapping. (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8694deb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8694deb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8694deb

Branch: refs/heads/YARN-5734
Commit: e8694deb6ad180449f8ce6c1c8b4f84873c0587a
Parents: 50decd3
Author: Harsh J 
Authored: Mon Nov 14 15:59:58 2016 +0530
Committer: Harsh J 
Committed: Fri Feb 24 21:34:00 2017 +0530

--
 .../fs/CommonConfigurationKeysPublic.java   |  15 +++
 .../security/ShellBasedUnixGroupsMapping.java   | 114 +---
 .../main/java/org/apache/hadoop/util/Shell.java |  19 ++-
 .../src/main/resources/core-default.xml |  13 ++
 .../hadoop/security/TestGroupsCaching.java  |  19 +--
 .../TestShellBasedUnixGroupsMapping.java| 135 ++-
 6 files changed, 277 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8694deb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index f23dd51..e1feda1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -517,6 +517,21 @@ public class CommonConfigurationKeysPublic {
* 
* core-default.xml
*/
+  public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS =
+  "hadoop.security.groups.shell.command.timeout";
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
+  public static final long
+  HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT =
+  0L;
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
   public static final String  HADOOP_SECURITY_AUTHENTICATION =
 "hadoop.security.authentication";
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8694deb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
index 9b80be9..4146e7b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
@@ -18,17 +18,25 @@
 package org.apache.hadoop.security;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.StringTokenizer;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ExitCodeException;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A simple shell-based implementation of {@link GroupMappingServiceProvider} 
@@ -37,11 +45,28 @@ import org.apache.hadoop.util.Shell.ShellCommandExecutor;
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
-public class ShellBasedUnixGroupsMapping
+public class ShellBasedUnixGroupsMapping extends Configured
   implements GroupMappingServiceProvider {
-  
-  private static final Log LOG =
-LogFactory.getLog(ShellBasedUnixGroupsMapping.class);
+
+  @VisibleForTesting
+  protected static final Logger LOG =
+  LoggerFactory.getLogger(ShellBasedUnixGroupsMapping.class);
+
+  private long timeout = 0L;
+  private static final List EMPTY_GROUPS = new LinkedList<>();
+
+  @Override
+  public void setConf(Configuration conf) {
+super.setConf(conf);
+if 

[09/26] hadoop git commit: YARN-6194. Cluster capacity in SchedulingPolicy is updated only on allocation file reload. (Yufei Gu via kasha)

2017-02-24 Thread xgong
YARN-6194. Cluster capacity in SchedulingPolicy is updated only on allocation 
file reload. (Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b10e9622
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b10e9622
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b10e9622

Branch: refs/heads/YARN-5734
Commit: b10e962224a8ae1c6031a05322b0cc5e564bd078
Parents: 718ad9f
Author: Karthik Kambatla 
Authored: Wed Feb 22 15:58:49 2017 -0800
Committer: Karthik Kambatla 
Committed: Wed Feb 22 15:58:49 2017 -0800

--
 .../scheduler/fair/FSContext.java   | 21 
 .../resourcemanager/scheduler/fair/FSQueue.java |  2 +-
 .../scheduler/fair/FairScheduler.java   |  6 ++---
 .../scheduler/fair/SchedulingPolicy.java| 19 ++-
 .../DominantResourceFairnessPolicy.java | 16 +++--
 .../scheduler/fair/TestFairScheduler.java   |  8 +++
 .../TestDominantResourceFairnessPolicy.java | 25 +++-
 7 files changed, 74 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b10e9622/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
index 56bc99c..a4aa8f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import org.apache.hadoop.yarn.api.records.Resource;
+
 /**
  * Helper class that holds basic information to be passed around
  * FairScheduler classes. Think of this as a glorified map that holds key
@@ -27,28 +29,37 @@ public class FSContext {
   private boolean preemptionEnabled = false;
   private float preemptionUtilizationThreshold;
   private FSStarvedApps starvedApps;
+  private FairScheduler scheduler;
+
+  FSContext(FairScheduler scheduler) {
+this.scheduler = scheduler;
+  }
 
-  public boolean isPreemptionEnabled() {
+  boolean isPreemptionEnabled() {
 return preemptionEnabled;
   }
 
-  public void setPreemptionEnabled() {
+  void setPreemptionEnabled() {
 this.preemptionEnabled = true;
 if (starvedApps == null) {
   starvedApps = new FSStarvedApps();
 }
   }
 
-  public FSStarvedApps getStarvedApps() {
+  FSStarvedApps getStarvedApps() {
 return starvedApps;
   }
 
-  public float getPreemptionUtilizationThreshold() {
+  float getPreemptionUtilizationThreshold() {
 return preemptionUtilizationThreshold;
   }
 
-  public void setPreemptionUtilizationThreshold(
+  void setPreemptionUtilizationThreshold(
   float preemptionUtilizationThreshold) {
 this.preemptionUtilizationThreshold = preemptionUtilizationThreshold;
   }
+
+  public Resource getClusterResource() {
+return scheduler.getClusterResource();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b10e9622/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index 7e8b858..b5592c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -135,7 +135,7 @@ public abstract class FSQueue implements 

[04/26] hadoop git commit: YARN-6143. Fix incompatible issue caused by YARN-3583. (Sunil G via wangda)

2017-02-24 Thread xgong
YARN-6143. Fix incompatible issue caused by YARN-3583. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a6ca75f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a6ca75f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a6ca75f

Branch: refs/heads/YARN-5734
Commit: 1a6ca75f3872587fb34c995a4b372b8cd6366d7d
Parents: cfcd527
Author: Wangda Tan 
Authored: Wed Feb 22 11:17:09 2017 -0800
Committer: Wangda Tan 
Committed: Wed Feb 22 11:17:09 2017 -0800

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |  6 +--
 .../GetLabelsToNodesResponse.java   |  6 +--
 .../GetNodesToLabelsResponse.java   |  7 ++-
 .../AddToClusterNodeLabelsRequest.java  | 11 ++---
 ..._server_resourcemanager_service_protos.proto |  5 +-
 .../src/main/proto/yarn_protos.proto|  6 +--
 .../src/main/proto/yarn_service_protos.proto|  5 +-
 .../hadoop/yarn/client/api/YarnClient.java  |  6 +--
 .../yarn/client/api/impl/YarnClientImpl.java|  6 +--
 .../yarn/client/api/impl/TestYarnClient.java| 49 --
 .../pb/GetClusterNodeLabelsResponsePBImpl.java  |  4 ++
 .../impl/pb/GetLabelsToNodesResponsePBImpl.java | 20 +++-
 .../impl/pb/GetNodesToLabelsResponsePBImpl.java | 52 
 .../pb/AddToClusterNodeLabelsRequestPBImpl.java | 47 +-
 .../pb/ReplaceLabelsOnNodeRequestPBImpl.java| 18 +++
 .../server/resourcemanager/AdminService.java|  7 +--
 .../server/resourcemanager/ClientRMService.java |  6 +--
 .../resourcemanager/TestClientRMService.java| 42 +---
 18 files changed, 136 insertions(+), 167 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a6ca75f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index c302553..62aa497 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -476,19 +476,19 @@ public class ResourceMgrDelegate extends YarnClient {
 return client.listReservations(request);
   }
   @Override
-  public Map getNodeToLabels() throws YarnException,
+  public Map getNodeToLabels() throws YarnException,
   IOException {
 return client.getNodeToLabels();
   }
 
   @Override
-  public Map getLabelsToNodes() throws YarnException,
+  public Map getLabelsToNodes() throws YarnException,
   IOException {
 return client.getLabelsToNodes();
   }
 
   @Override
-  public Map getLabelsToNodes(Set labels)
+  public Map getLabelsToNodes(Set labels)
   throws YarnException, IOException {
 return client.getLabelsToNodes(labels);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a6ca75f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
index da2be28..ef0bf60 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.util.Records;
 
 public abstract class GetLabelsToNodesResponse {
   public static GetLabelsToNodesResponse newInstance(
-  Map map) {
+  Map map) {
GetLabelsToNodesResponse response =
 Records.newRecord(GetLabelsToNodesResponse.class);
 response.setLabelsToNodes(map);
@@ -38,9 +38,9 @@ public abstract class GetLabelsToNodesResponse {
 
   @Public
   @Evolving
-  public abstract void 

[16/26] hadoop git commit: HDFS-11426. Refactor EC CLI to be similar to storage policies CLI.

2017-02-24 Thread xgong
HDFS-11426. Refactor EC CLI to be similar to storage policies CLI.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/132f758e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/132f758e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/132f758e

Branch: refs/heads/YARN-5734
Commit: 132f758e3dbe3a3f11c0d9b2de8edbee594fb475
Parents: 694e680
Author: Andrew Wang 
Authored: Thu Feb 23 16:00:11 2017 -0800
Committer: Andrew Wang 
Committed: Thu Feb 23 16:00:11 2017 -0800

--
 .../org/apache/hadoop/cli/CLITestHelper.java|  15 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |   2 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 320 +++
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   2 +-
 .../hadoop/hdfs/tools/erasurecode/ECCli.java|  62 
 .../hdfs/tools/erasurecode/ECCommand.java   | 248 --
 .../src/site/markdown/HDFSErasureCoding.md  |  16 +-
 .../hadoop/cli/CLITestCmdErasureCoding.java |   4 +-
 .../cli/util/ErasureCodingCliCmdExecutor.java   |   6 +-
 .../test/resources/testErasureCodingConf.xml| 135 
 10 files changed, 405 insertions(+), 405 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
index b08af16..89d4e30 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.cli;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.cli.util.*;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
 import org.apache.hadoop.conf.Configuration;
@@ -28,6 +26,9 @@ import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.xml.sax.Attributes;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.DefaultHandler;
@@ -41,9 +42,9 @@ import java.util.ArrayList;
  * Tests for the Command Line Interface (CLI)
  */
 public class CLITestHelper {
-  private static final Log LOG =
-LogFactory.getLog(CLITestHelper.class.getName());
-  
+  private static final Logger LOG = LoggerFactory.getLogger(CLITestHelper
+  .class);
+
   // In this mode, it runs the command and compares the actual output
   // with the expected output  
   public static final String TESTMODE_TEST = "test"; // Run the tests
@@ -62,7 +63,6 @@ public class CLITestHelper {
   // Storage for tests read in from the config file
   protected ArrayList testsFromConfigFile = null;
   protected ArrayList testComparators = null;
-  protected String thisTestCaseName = null;
   protected ComparatorData comparatorData = null;
   protected Configuration conf = null;
   protected String clitestDataDir = null;
@@ -80,7 +80,8 @@ public class CLITestHelper {
 p.parse(testConfigFile, getConfigParser());
 success = true;
   } catch (Exception e) {
-LOG.info("File: " + testConfigFile + " not found");
+LOG.info("Exception while reading test config file {}:",
+testConfigFile, e);
 success = false;
   }
   assertTrue("Error reading test config file", success);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 617adbe..cf6d94a 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -130,7 +130,7 @@ function hdfscmd_case
   exit 0
 ;;
 ec)
-  HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.erasurecode.ECCli
+  HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.ECAdmin
 ;;
 fetchdt)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 

[11/26] hadoop git commit: YARN-6211. Synchronization improvement for moveApplicationAcrossQueues and updateApplicationPriority. Contributed by Bibin A Chundatt.

2017-02-24 Thread xgong
YARN-6211. Synchronization improvement for moveApplicationAcrossQueues and 
updateApplicationPriority. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a207aa99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a207aa99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a207aa99

Branch: refs/heads/YARN-5734
Commit: a207aa9930e7ee4f10228e2db4b4e733794eb8ea
Parents: 13d4bcf
Author: Sunil G 
Authored: Thu Feb 23 14:19:07 2017 +0530
Committer: Sunil G 
Committed: Thu Feb 23 14:19:07 2017 +0530

--
 .../hadoop/yarn/server/resourcemanager/ClientRMService.java| 6 --
 .../hadoop/yarn/server/resourcemanager/RMAppManager.java   | 5 ++---
 2 files changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a207aa99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 0c87ede..48bccfb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1228,7 +1228,8 @@ public class ClientRMService extends AbstractService 
implements
 }
 
 try {
-  this.rmAppManager.moveApplicationAcrossQueue(applicationId,
+  this.rmAppManager.moveApplicationAcrossQueue(
+  application.getApplicationId(),
   request.getTargetQueue());
 } catch (YarnException ex) {
   RMAuditLogger.logFailure(callerUGI.getShortUserName(),
@@ -1662,7 +1663,8 @@ public class ClientRMService extends AbstractService 
implements
 }
 
 try {
-  rmAppManager.updateApplicationPriority(callerUGI, applicationId,
+  rmAppManager.updateApplicationPriority(callerUGI,
+  application.getApplicationId(),
   newAppPriority);
 } catch (YarnException ex) {
   RMAuditLogger.logFailure(callerUGI.getShortUserName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a207aa99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index cc796e3..e211867 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -53,7 +53,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRecoverEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
@@ -612,7 +611,7 @@ public class RMAppManager implements 
EventHandler,
 RMApp app = this.rmContext.getRMApps().get(applicationId);
 
 synchronized (applicationId) {
-  if (app.isAppInCompletedStates()) {
+  if (app == null || app.isAppInCompletedStates()) {
 return;
   }
 
@@ -658,7 +657,7 @@ public class RMAppManager implements 
EventHandler,
 // 2. Update this information to state-store
 // 3. Perform real move operation and update in-memory data structures.
 

[10/26] hadoop git commit: HDFS-4025. QJM: Sychronize past log segments to JNs that missed them. Contributed by Hanisha Koneru.

2017-02-24 Thread xgong
HDFS-4025. QJM: Sychronize past log segments to JNs that missed them. 
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13d4bcfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13d4bcfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13d4bcfe

Branch: refs/heads/YARN-5734
Commit: 13d4bcfe3535a2df79c2a56e7578716d15497ff4
Parents: b10e962
Author: Jing Zhao 
Authored: Wed Feb 22 16:33:38 2017 -0800
Committer: Jing Zhao 
Committed: Wed Feb 22 16:33:38 2017 -0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  16 +
 .../qjournal/client/QuorumJournalManager.java   |  38 +-
 .../hadoop/hdfs/qjournal/server/JNStorage.java  |   9 +-
 .../hadoop/hdfs/qjournal/server/Journal.java|  19 +
 .../hdfs/qjournal/server/JournalNode.java   |  23 +-
 .../hdfs/qjournal/server/JournalNodeSyncer.java | 413 +++
 .../hadoop/hdfs/server/common/Storage.java  |   9 +
 .../apache/hadoop/hdfs/server/common/Util.java  |  46 ++-
 .../hadoop/hdfs/server/namenode/NNStorage.java  |   5 +-
 .../hdfs/server/namenode/TransferFsImage.java   |   3 +-
 .../src/main/resources/hdfs-default.xml |  41 ++
 .../hdfs/qjournal/MiniJournalCluster.java   |   8 +
 .../hadoop/hdfs/qjournal/MiniQJMHACluster.java  |   1 +
 .../hdfs/qjournal/TestJournalNodeSync.java  | 264 
 14 files changed, 853 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index cf1d21a..cfd16aa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -707,6 +707,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String DFS_IMAGE_TRANSFER_CHUNKSIZE_KEY = 
"dfs.image.transfer.chunksize";
   public static final int DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT = 64 * 1024;
 
+  // Edit Log segment transfer timeout
+  public static final String DFS_EDIT_LOG_TRANSFER_TIMEOUT_KEY =
+  "dfs.edit.log.transfer.timeout";
+  public static final int DFS_EDIT_LOG_TRANSFER_TIMEOUT_DEFAULT = 30 * 1000;
+
+  // Throttling Edit Log Segment transfer for Journal Sync
+  public static final String DFS_EDIT_LOG_TRANSFER_RATE_KEY =
+  "dfs.edit.log.transfer.bandwidthPerSec";
+  public static final long DFS_EDIT_LOG_TRANSFER_RATE_DEFAULT = 0; //no 
throttling
+
   // Datanode File IO Stats
   public static final String DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY =
   "dfs.datanode.enable.fileio.profiling";
@@ -891,6 +901,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_JOURNALNODE_KEYTAB_FILE_KEY = 
"dfs.journalnode.keytab.file";
   public static final String  DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY = 
"dfs.journalnode.kerberos.principal";
   public static final String  
DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY = 
"dfs.journalnode.kerberos.internal.spnego.principal";
+  public static final String DFS_JOURNALNODE_ENABLE_SYNC_KEY =
+  "dfs.journalnode.enable.sync";
+  public static final boolean DFS_JOURNALNODE_ENABLE_SYNC_DEFAULT = false;
+  public static final String DFS_JOURNALNODE_SYNC_INTERVAL_KEY =
+  "dfs.journalnode.sync.interval";
+  public static final long DFS_JOURNALNODE_SYNC_INTERVAL_DEFAULT = 2*60*1000L;
 
   // Journal-node related configs for the client side.
   public static final String  DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY = 
"dfs.qjournal.queued-edits.limit.mb";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index ae3358b..97c0050 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
 

[26/26] hadoop git commit: YARN-5951. Changes to allow CapacityScheduler to use configuration store

2017-02-24 Thread xgong
YARN-5951. Changes to allow CapacityScheduler to use configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42a814a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42a814a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42a814a6

Branch: refs/heads/YARN-5734
Commit: 42a814a669067f29563a352230442ebf1d17eaed
Parents: d2b3ba9
Author: Jonathan Hung 
Authored: Mon Jan 30 19:03:48 2017 -0800
Committer: Xuan 
Committed: Fri Feb 24 15:56:02 2017 -0800

--
 .../scheduler/capacity/CapacityScheduler.java   | 36 +--
 .../CapacitySchedulerConfiguration.java | 10 +++
 .../capacity/conf/CSConfigurationProvider.java  | 46 ++
 .../conf/FileBasedCSConfigurationProvider.java  | 67 
 .../scheduler/capacity/conf/package-info.java   | 29 +
 .../capacity/TestCapacityScheduler.java |  4 +-
 6 files changed, 170 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42a814a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 3517764..de0f162 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -108,6 +107,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -166,6 +167,7 @@ public class CapacityScheduler extends
 
   private int offswitchPerHeartbeatLimit;
 
+  private CSConfigurationProvider csConfProvider;
 
   @Override
   public void setConf(Configuration conf) {
@@ -289,7 +291,18 @@ public class CapacityScheduler extends
   IOException {
 try {
   writeLock.lock();
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  String confProviderStr = configuration.get(
+  CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+  CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
+  if (confProviderStr.equals(
+  CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
+this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
+  } else {
+throw new IOException("Invalid CS configuration provider: " +
+confProviderStr);
+  }
+  this.csConfProvider.init(configuration);
+  this.conf = this.csConfProvider.loadConfiguration(configuration);
   validateConf(this.conf);
   this.minimumAllocation = this.conf.getMinimumAllocation();
   initMaximumResourceCapability(this.conf.getMaximumAllocation());
@@ -396,7 +409,7 @@ public class CapacityScheduler extends
   writeLock.lock();
   Configuration configuration = new Configuration(newConf);
   CapacitySchedulerConfiguration oldConf = this.conf;
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  this.conf = csConfProvider.loadConfiguration(configuration);
 

[19/26] hadoop git commit: HADOOP-14097. Remove Java6 specific code from GzipCodec.java. Contributed by Elek, Marton.

2017-02-24 Thread xgong
HADOOP-14097. Remove Java6 specific code from GzipCodec.java. Contributed by 
Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50decd36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50decd36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50decd36

Branch: refs/heads/YARN-5734
Commit: 50decd36130945e184734dcd55b8912be6f4550a
Parents: e60c654
Author: Akira Ajisaka 
Authored: Sat Feb 25 00:28:31 2017 +0900
Committer: Akira Ajisaka 
Committed: Sat Feb 25 00:28:31 2017 +0900

--
 .../apache/hadoop/io/compress/GzipCodec.java| 59 
 1 file changed, 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50decd36/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
index 01b6434..d079412 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
@@ -45,10 +45,6 @@ public class GzipCodec extends DefaultCodec {
   protected static class GzipOutputStream extends CompressorStream {
 
 private static class ResetableGZIPOutputStream extends GZIPOutputStream {
-  private static final int TRAILER_SIZE = 8;
-  public static final String JVMVersion= 
System.getProperty("java.version");
-  private static final boolean HAS_BROKEN_FINISH =
-  (IBM_JAVA && JVMVersion.contains("1.6.0"));
 
   public ResetableGZIPOutputStream(OutputStream out) throws IOException {
 super(out);
@@ -57,61 +53,6 @@ public class GzipCodec extends DefaultCodec {
   public void resetState() throws IOException {
 def.reset();
   }
-
-  /**
-   * Override this method for HADOOP-8419.
-   * Override because IBM implementation calls def.end() which
-   * causes problem when reseting the stream for reuse.
-   *
-   */
-  @Override
-  public void finish() throws IOException {
-if (HAS_BROKEN_FINISH) {
-  if (!def.finished()) {
-def.finish();
-while (!def.finished()) {
-  int i = def.deflate(this.buf, 0, this.buf.length);
-  if ((def.finished()) && (i <= this.buf.length - TRAILER_SIZE)) {
-writeTrailer(this.buf, i);
-i += TRAILER_SIZE;
-out.write(this.buf, 0, i);
-
-return;
-  }
-  if (i > 0) {
-out.write(this.buf, 0, i);
-  }
-}
-
-byte[] arrayOfByte = new byte[TRAILER_SIZE];
-writeTrailer(arrayOfByte, 0);
-out.write(arrayOfByte);
-  }
-} else {
-  super.finish();
-}
-  }
-
-  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
-  private void writeTrailer(byte[] paramArrayOfByte, int paramInt)
-throws IOException {
-writeInt((int)this.crc.getValue(), paramArrayOfByte, paramInt);
-writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
-  }
-
-  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
-  private void writeInt(int paramInt1, byte[] paramArrayOfByte, int 
paramInt2)
-throws IOException {
-writeShort(paramInt1 & 0x, paramArrayOfByte, paramInt2);
-writeShort(paramInt1 >> 16 & 0x, paramArrayOfByte, paramInt2 + 2);
-  }
-
-  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
-  private void writeShort(int paramInt1, byte[] paramArrayOfByte, int 
paramInt2)
-throws IOException {
-paramArrayOfByte[paramInt2] = (byte)(paramInt1 & 0xFF);
-paramArrayOfByte[(paramInt2 + 1)] = (byte)(paramInt1 >> 8 & 0xFF);
-  }
 }
 
 public GzipOutputStream(OutputStream out) throws IOException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/26] hadoop git commit: MAPREDUCE-6825. YARNRunner#createApplicationSubmissionContext method is longer than 150 lines (Contributed by Gergely Novák via Daniel Templeton)

2017-02-24 Thread xgong
MAPREDUCE-6825. YARNRunner#createApplicationSubmissionContext method is longer 
than 150 lines (Contributed by Gergely Novák via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/732ee6f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/732ee6f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/732ee6f0

Branch: refs/heads/YARN-5734
Commit: 732ee6f0b58a12500198c0d934cc570c7490b520
Parents: d150f06
Author: Daniel Templeton 
Authored: Wed Feb 22 15:38:11 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Feb 22 15:38:11 2017 -0800

--
 .../org/apache/hadoop/mapred/YARNRunner.java| 141 +++
 1 file changed, 86 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/732ee6f0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index 98fe553..228c6af 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -291,8 +291,7 @@ public class YARNRunner implements ClientProtocol {
   throws IOException, InterruptedException {
 
 addHistoryToken(ts);
-
-// Construct necessary information to start the MR AM
+
 ApplicationSubmissionContext appContext =
   createApplicationSubmissionContext(conf, jobSubmitDir, ts);
 
@@ -331,34 +330,15 @@ public class YARNRunner implements ClientProtocol {
 return rsrc;
   }
 
-  public ApplicationSubmissionContext createApplicationSubmissionContext(
-  Configuration jobConf,
-  String jobSubmitDir, Credentials ts) throws IOException {
-ApplicationId applicationId = resMgrDelegate.getApplicationId();
-
-// Setup resource requirements
-Resource capability = recordFactory.newRecordInstance(Resource.class);
-capability.setMemorySize(
-conf.getInt(
-MRJobConfig.MR_AM_VMEM_MB, MRJobConfig.DEFAULT_MR_AM_VMEM_MB
-)
-);
-capability.setVirtualCores(
-conf.getInt(
-MRJobConfig.MR_AM_CPU_VCORES, MRJobConfig.DEFAULT_MR_AM_CPU_VCORES
-)
-);
-LOG.debug("AppMaster capability = " + capability);
-
-// Setup LocalResources
-Map localResources =
-new HashMap();
+  private Map setupLocalResources(Configuration jobConf,
+  String jobSubmitDir) throws IOException {
+Map localResources = new HashMap<>();
 
 Path jobConfPath = new Path(jobSubmitDir, MRJobConfig.JOB_CONF_FILE);
 
-URL yarnUrlForJobSubmitDir = 
URL.fromPath(defaultFileContext.getDefaultFileSystem()
-.resolvePath(
-defaultFileContext.makeQualified(new Path(jobSubmitDir;
+URL yarnUrlForJobSubmitDir = URL.fromPath(defaultFileContext
+.getDefaultFileSystem().resolvePath(
+defaultFileContext.makeQualified(new Path(jobSubmitDir;
 LOG.debug("Creating setup context, jobSubmitDir url is "
 + yarnUrlForJobSubmitDir);
 
@@ -371,7 +351,7 @@ public class YARNRunner implements ClientProtocol {
   FileContext.getFileContext(jobJarPath.toUri(), jobConf),
   jobJarPath,
   LocalResourceType.PATTERN);
-  String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN, 
+  String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN,
   JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
   rc.setPattern(pattern);
   localResources.put(MRJobConfig.JOB_JAR, rc);
@@ -392,13 +372,11 @@ public class YARNRunner implements ClientProtocol {
   new Path(jobSubmitDir, s), LocalResourceType.FILE));
 }
 
-// Setup security tokens
-DataOutputBuffer dob = new DataOutputBuffer();
-ts.writeTokenStorageToStream(dob);
-ByteBuffer securityTokens  = ByteBuffer.wrap(dob.getData(), 0, 
dob.getLength());
+return localResources;
+  }
 
-// Setup the command to run the AM
-List vargs = new ArrayList(8);
+  private List setupAMCommand(Configuration jobConf) {
+List vargs = new ArrayList<>(8);
 vargs.add(MRApps.crossPlatformifyMREnv(jobConf, 

[12/26] hadoop git commit: HADOOP-13321. Deprecate FileSystem APIs that promote inefficient call patterns. Contributed by Chris Nauroth and Mingliang Liu

2017-02-24 Thread xgong
HADOOP-13321. Deprecate FileSystem APIs that promote inefficient call patterns. 
Contributed by Chris Nauroth and Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4d4a237
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4d4a237
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4d4a237

Branch: refs/heads/YARN-5734
Commit: a4d4a23785356e6a19d0db3a2dec8ae8cf861273
Parents: a207aa9
Author: Mingliang Liu 
Authored: Thu Feb 16 16:25:51 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 23 12:55:40 2017 -0800

--
 .../java/org/apache/hadoop/fs/ChecksumFileSystem.java|  2 ++
 .../src/main/java/org/apache/hadoop/fs/FileSystem.java   | 11 +++
 .../java/org/apache/hadoop/fs/ftp/FTPFileSystem.java |  1 +
 .../java/org/apache/hadoop/fs/s3a/S3AFileSystem.java |  2 ++
 .../hadoop/fs/swift/snative/SwiftNativeFileSystem.java   |  2 ++
 5 files changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d4a237/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index e0ce327..14c1905 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -605,6 +605,7 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
* Rename files/dirs
*/
   @Override
+  @SuppressWarnings("deprecation")
   public boolean rename(Path src, Path dst) throws IOException {
 if (fs.isDirectory(src)) {
   return fs.rename(src, dst);
@@ -721,6 +722,7 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
* If src and dst are directories, the copyCrc parameter
* determines whether to copy CRC files.
*/
+  @SuppressWarnings("deprecation")
   public void copyToLocalFile(Path src, Path dst, boolean copyCrc)
 throws IOException {
 if (!fs.isDirectory(src)) { // source is a file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d4a237/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 55cd97e..ededfa9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1624,6 +1624,11 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   }
 
   /** Check if a path exists.
+   *
+   * It is highly discouraged to call this method back to back with other
+   * {@link #getFileStatus(Path)} calls, as this will involve multiple 
redundant
+   * RPC calls in HDFS.
+   *
* @param f source path
* @return true if the path exists
* @throws IOException IO failure
@@ -1639,9 +1644,12 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   /** True iff the named path is a directory.
* Note: Avoid using this method. Instead reuse the FileStatus
* returned by getFileStatus() or listStatus() methods.
+   *
* @param f path to check
* @throws IOException IO failure
+   * @deprecated Use {@link #getFileStatus(Path)} instead
*/
+  @Deprecated
   public boolean isDirectory(Path f) throws IOException {
 try {
   return getFileStatus(f).isDirectory();
@@ -1653,9 +1661,12 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   /** True iff the named path is a regular file.
* Note: Avoid using this method. Instead reuse the FileStatus
* returned by {@link #getFileStatus(Path)} or listStatus() methods.
+   *
* @param f path to check
* @throws IOException IO failure
+   * @deprecated Use {@link #getFileStatus(Path)} instead
*/
+  @Deprecated
   public boolean isFile(Path f) throws IOException {
 try {
   return getFileStatus(f).isFile();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d4a237/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
--
diff --git 

[22/26] hadoop git commit: HDFS-11427. Rename rs-default to rs.

2017-02-24 Thread xgong
HDFS-11427. Rename rs-default to rs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1a52b04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1a52b04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1a52b04

Branch: refs/heads/YARN-5734
Commit: c1a52b04d0cc5ad5c86ae93043655f313386f7f9
Parents: b32ffa2
Author: Andrew Wang 
Authored: Fri Feb 24 10:58:45 2017 -0800
Committer: Andrew Wang 
Committed: Fri Feb 24 10:58:45 2017 -0800

--
 .../apache/hadoop/io/erasurecode/CodecUtil.java | 28 +++
 .../io/erasurecode/ErasureCodeConstants.java|  8 ++---
 .../erasurecode/coder/HHXORErasureDecoder.java  |  2 +-
 .../erasurecode/coder/HHXORErasureEncoder.java  |  2 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  |  2 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  2 +-
 .../src/main/resources/core-default.xml |  4 +--
 .../erasurecode/TestCodecRawCoderMapping.java   | 10 +++---
 .../coder/TestHHXORErasureCoder.java|  2 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |  2 +-
 .../src/site/markdown/HDFSErasureCoding.md  |  6 ++--
 .../TestDFSRSDefault10x4StripedInputStream.java |  2 +-
 ...TestDFSRSDefault10x4StripedOutputStream.java |  2 +-
 ...fault10x4StripedOutputStreamWithFailure.java |  4 +--
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  2 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  2 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |  2 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |  2 +-
 .../TestUnsetAndChangeDirectoryEcPolicy.java|  2 +-
 .../test/resources/testErasureCodingConf.xml| 36 ++--
 20 files changed, 61 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
index 977bacb..861451a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
@@ -55,9 +55,9 @@ public final class CodecUtil {
   public static final String IO_ERASURECODE_CODEC_XOR =
   XORErasureCodec.class.getCanonicalName();
   /** Erasure coder Reed-Solomon codec. */
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_KEY =
+  public static final String IO_ERASURECODE_CODEC_RS_KEY =
   "io.erasurecode.codec.rs";
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT =
+  public static final String IO_ERASURECODE_CODEC_RS =
   RSErasureCodec.class.getCanonicalName();
   /** Erasure coder hitch hiker XOR codec. */
   public static final String IO_ERASURECODE_CODEC_HHXOR_KEY =
@@ -67,10 +67,10 @@ public final class CodecUtil {
 
   /** Supported erasure codec classes. */
 
-  /** Raw coder factory for the RS default codec. */
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY =
-  "io.erasurecode.codec.rs-default.rawcoder";
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT =
+  /** Raw coder factory for the RS codec. */
+  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
+  "io.erasurecode.codec.rs.rawcoder";
+  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT =
   RSRawErasureCoderFactory.class.getCanonicalName();
 
   /** Raw coder factory for the RS legacy codec. */
@@ -183,10 +183,10 @@ public final class CodecUtil {
   private static String getRawCoderFactNameFromCodec(Configuration conf,
  String codec) {
 switch (codec) {
-case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
+case ErasureCodeConstants.RS_CODEC_NAME:
   return conf.get(
-  IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
-  IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT);
+  IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
+  IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT);
 case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
   return conf.get(
   IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY,
@@ -233,15 +233,15 @@ public final class CodecUtil {
 
   private static String getCodecClassName(Configuration conf, String codec) {
 switch (codec) {
-case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
+case ErasureCodeConstants.RS_CODEC_NAME:
   return conf.get(
-  

[03/26] hadoop git commit: HDFS-11411. Avoid OutOfMemoryError in TestMaintenanceState test runs. (Manoj Govindassamy via mingma)

2017-02-24 Thread xgong
HDFS-11411. Avoid OutOfMemoryError in TestMaintenanceState test runs. (Manoj 
Govindassamy via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfcd5273
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfcd5273
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfcd5273

Branch: refs/heads/YARN-5734
Commit: cfcd527323352cf2a851c5c41f5d243d375d88d0
Parents: 4f4250f
Author: Ming Ma 
Authored: Wed Feb 22 09:41:07 2017 -0800
Committer: Ming Ma 
Committed: Wed Feb 22 09:41:07 2017 -0800

--
 .../test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfcd5273/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index bbf947f..f3e2a0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -333,6 +333,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
   private void testExpectedReplication(int replicationFactor,
   int expectedReplicasInRead) throws IOException {
+setup();
 startCluster(1, 5);
 
 final Path file = new Path("/testExpectedReplication.dat");
@@ -352,6 +353,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 nodeOutofService));
 
 cleanupFile(fileSys, file);
+teardown();
   }
 
   /**
@@ -492,6 +494,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 
   private void testDecommissionDifferentNodeAfterMaintenance(int repl)
   throws Exception {
+setup();
 startCluster(1, 5);
 
 final Path file =
@@ -519,6 +522,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 assertNull(checkWithRetry(ns, fileSys, file, repl + 1, null));
 
 cleanupFile(fileSys, file);
+teardown();
   }
 
   /**
@@ -583,6 +587,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
*/
   private void testChangeReplicationFactor(int oldFactor, int newFactor,
   int expectedLiveReplicas) throws IOException {
+setup();
 LOG.info("Starting testChangeReplicationFactor {} {} {}",
 oldFactor, newFactor, expectedLiveReplicas);
 startCluster(1, 5);
@@ -615,6 +620,7 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
 assertNull(checkWithRetry(ns, fileSys, file, newFactor, null));
 
 cleanupFile(fileSys, file);
+teardown();
   }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/26] hadoop git commit: HDFS-11295. Check storage remaining instead of node remaining in BlockPlacementPolicyDefault.chooseReplicaToDelete(). Contributed by Marton Elek.

2017-02-24 Thread xgong
HDFS-11295. Check storage remaining instead of node remaining in 
BlockPlacementPolicyDefault.chooseReplicaToDelete(). Contributed by Marton Elek.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2b3ba9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2b3ba9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2b3ba9b

Branch: refs/heads/YARN-5734
Commit: d2b3ba9b8fb76753fa1b51661dacbde74aa5c6df
Parents: 289bc50
Author: Arpit Agarwal 
Authored: Fri Feb 24 15:44:11 2017 -0800
Committer: Arpit Agarwal 
Committed: Fri Feb 24 15:44:11 2017 -0800

--
 .../BlockPlacementPolicyDefault.java|  2 +-
 .../blockmanagement/DatanodeStorageInfo.java|  5 +++
 .../blockmanagement/TestReplicationPolicy.java  | 35 ++--
 .../TestReplicationPolicyWithNodeGroup.java | 23 ++---
 4 files changed, 49 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index eb54667..7676334 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -968,7 +968,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   }
 
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
-  long free = node.getRemaining();
+  long free = storage.getRemaining();
   long lastHeartbeat = node.getLastUpdateMonotonic();
   if (lastHeartbeat < oldestHeartbeat) {
 oldestHeartbeat = lastHeartbeat;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index b4c8aaa..ab666b7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -388,6 +388,11 @@ public class DatanodeStorageInfo {
 return null;
   }
 
+  @VisibleForTesting
+  void setRemainingForTests(int remaining) {
+this.remaining = remaining;
+  }
+
   static enum AddBlockResult {
 ADDED, REPLACED, ALREADY_EXIST
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 1af013d..27dcbf1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -950,24 +950,31 @@ public class TestReplicationPolicy extends 
BaseReplicationPolicyTest {
 List replicaList = new ArrayList<>();
 final Map rackMap
 = new HashMap();
-
-dataNodes[0].setRemaining(4*1024*1024);
+
+storages[0].setRemainingForTests(4*1024*1024);
+dataNodes[0].setRemaining(calculateRemaining(dataNodes[0]));
 replicaList.add(storages[0]);
-
-dataNodes[1].setRemaining(3*1024*1024);
+
+storages[1].setRemainingForTests(3*1024*1024);
+dataNodes[1].setRemaining(calculateRemaining(dataNodes[1]));
 replicaList.add(storages[1]);
-
-dataNodes[2].setRemaining(2*1024*1024);
+
+storages[2].setRemainingForTests(2*1024*1024);
+dataNodes[2].setRemaining(calculateRemaining(dataNodes[2]));
 

[14/26] hadoop git commit: HADOOP-14100. Upgrade Jsch jar to latest version to fix vulnerability in old versions. Contributed by Vinayakumar B

2017-02-24 Thread xgong
HADOOP-14100. Upgrade Jsch jar to latest version to fix vulnerability in old 
versions. Contributed by Vinayakumar B


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/159d6c56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/159d6c56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/159d6c56

Branch: refs/heads/YARN-5734
Commit: 159d6c56e7f3aa3ebe45750cf88735287f047b42
Parents: 82607fc
Author: Arpit Agarwal 
Authored: Thu Feb 23 14:25:08 2017 -0800
Committer: Arpit Agarwal 
Committed: Thu Feb 23 14:25:08 2017 -0800

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/159d6c56/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 47e21d8..c8aa857 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -963,7 +963,7 @@
   
 com.jcraft
 jsch
-0.1.51
+0.1.54
   
   
 org.apache.htrace


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/26] hadoop git commit: HADOOP-14102. Relax error message assertion in S3A test ITestS3AEncryptionSSEC. Contributed by Mingliang Liu

2017-02-24 Thread xgong
HADOOP-14102. Relax error message assertion in S3A test ITestS3AEncryptionSSEC. 
Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0013090f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0013090f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0013090f

Branch: refs/heads/YARN-5734
Commit: 0013090fb4340eadf147054e65a73de20a62c1c1
Parents: 1a6ca75
Author: Mingliang Liu 
Authored: Tue Feb 21 17:30:39 2017 -0800
Committer: Mingliang Liu 
Committed: Wed Feb 22 13:34:20 2017 -0800

--
 .../test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0013090f/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java
index 71586b8..a8cf70b 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java
@@ -58,7 +58,7 @@ public class ITestS3AEncryptionSSEC extends 
AbstractTestS3AEncryption {
 Exception {
 final Path[] path = new Path[1];
 intercept(java.nio.file.AccessDeniedException.class,
-"Forbidden (Service: Amazon S3; Status Code: 403;", () -> {
+"Service: Amazon S3; Status Code: 403;", () -> {
 
 int len = 2048;
 skipIfEncryptionTestsDisabled(getConfiguration());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14116:FailoverOnNetworkExceptionRetry does not wait when failover on certain exception. Contributed by Jian He

2017-02-24 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk 53d372a25 -> 289bc50e6


HADOOP-14116:FailoverOnNetworkExceptionRetry does not wait when failover
on certain exception. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/289bc50e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/289bc50e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/289bc50e

Branch: refs/heads/trunk
Commit: 289bc50e663b882956878eeaefe0eaa1ef4ed39e
Parents: 53d372a
Author: Xuan 
Authored: Fri Feb 24 11:42:23 2017 -0800
Committer: Xuan 
Committed: Fri Feb 24 11:42:23 2017 -0800

--
 .../src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/289bc50e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 0c523a5..d6f3e04 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -683,7 +683,8 @@ public class RetryPolicies {
   } else if (e instanceof SocketException
   || (e instanceof IOException && !(e instanceof RemoteException))) {
 if (isIdempotentOrAtMostOnce) {
-  return RetryAction.FAILOVER_AND_RETRY;
+  return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
+  getFailoverOrRetrySleepTime(retries));
 } else {
   return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
   "the invoked method is not idempotent, and unable to determine "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14116:FailoverOnNetworkExceptionRetry does not wait when failover on certain exception. Contributed by Jian He

2017-02-24 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bd3c2a2ee -> 5c509f5f0


HADOOP-14116:FailoverOnNetworkExceptionRetry does not wait when failover
on certain exception. Contributed by Jian He

(cherry picked from commit 289bc50e663b882956878eeaefe0eaa1ef4ed39e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c509f5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c509f5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c509f5f

Branch: refs/heads/branch-2
Commit: 5c509f5f0c9a39de96812e2a865ce5544f10aa29
Parents: bd3c2a2
Author: Xuan 
Authored: Fri Feb 24 11:42:23 2017 -0800
Committer: Xuan 
Committed: Fri Feb 24 11:43:22 2017 -0800

--
 .../src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c509f5f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 0c523a5..d6f3e04 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -683,7 +683,8 @@ public class RetryPolicies {
   } else if (e instanceof SocketException
   || (e instanceof IOException && !(e instanceof RemoteException))) {
 if (isIdempotentOrAtMostOnce) {
-  return RetryAction.FAILOVER_AND_RETRY;
+  return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
+  getFailoverOrRetrySleepTime(retries));
 } else {
   return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
   "the invoked method is not idempotent, and unable to determine "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6228: EntityGroupFSTimelineStore should allow configurable cache stores. Contributed by Li Lu

2017-02-24 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c097d0343 -> 19c5b3d62


YARN-6228: EntityGroupFSTimelineStore should allow configurable cache
stores. Contributed by Li Lu

(cherry picked from commit 53d372a2550c970f3dd3c49738af3c1789ae589b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19c5b3d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19c5b3d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19c5b3d6

Branch: refs/heads/branch-2
Commit: 19c5b3d622c618ab64ddc86b84353cbd350f32dc
Parents: c097d03
Author: Xuan 
Authored: Fri Feb 24 10:58:48 2017 -0800
Committer: Xuan 
Committed: Fri Feb 24 11:00:16 2017 -0800

--
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 3 +++
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml  | 5 +
 .../apache/hadoop/yarn/server/timeline/EntityCacheItem.java | 9 +++--
 3 files changed, 15 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19c5b3d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6b4d0f4..8d64c2b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1846,6 +1846,9 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX =
   TIMELINE_SERVICE_PREFIX + "entity-group-fs-store.";
 
+  public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CACHE_STORE 
=
+  TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "cache-store-class";
+
   public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR =
   TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "active-dir";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19c5b3d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 9f2af10..3b869e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2080,6 +2080,11 @@
   
 
   
+  
+yarn.timeline-service.entity-group-fs-store.cache-store-class
+org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore
+Caching storage timeline server v1.5 is using. 
+  
 
   
 yarn.timeline-service.entity-group-fs-store.active-dir

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19c5b3d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
index 7ed7c4a..8df60ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
@@ -17,8 +17,10 @@
 package org.apache.hadoop.yarn.server.timeline;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -95,8 +97,11 @@ public class EntityCacheItem {
   }
   if (!appLogs.getDetailLogs().isEmpty()) {
 if 

hadoop git commit: YARN-6228: EntityGroupFSTimelineStore should allow configurable cache stores. Contributed by Li Lu

2017-02-24 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk c1a52b04d -> 53d372a25


YARN-6228: EntityGroupFSTimelineStore should allow configurable cache
stores. Contributed by Li Lu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53d372a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53d372a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53d372a2

Branch: refs/heads/trunk
Commit: 53d372a2550c970f3dd3c49738af3c1789ae589b
Parents: c1a52b0
Author: Xuan 
Authored: Fri Feb 24 10:58:48 2017 -0800
Committer: Xuan 
Committed: Fri Feb 24 10:59:35 2017 -0800

--
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 3 +++
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml  | 5 +
 .../apache/hadoop/yarn/server/timeline/EntityCacheItem.java | 9 +++--
 3 files changed, 15 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d372a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 094a424..cdccec6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1890,6 +1890,9 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX =
   TIMELINE_SERVICE_PREFIX + "entity-group-fs-store.";
 
+  public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CACHE_STORE 
=
+  TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "cache-store-class";
+
   public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR =
   TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "active-dir";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d372a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 53beb5e..368946e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2096,6 +2096,11 @@
   
 
   
+  
+yarn.timeline-service.entity-group-fs-store.cache-store-class
+org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore
+Caching storage timeline server v1.5 is using. 
+  
 
   
 yarn.timeline-service.entity-group-fs-store.active-dir

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d372a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
index 7ed7c4a..8df60ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
@@ -17,8 +17,10 @@
 package org.apache.hadoop.yarn.server.timeline;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -95,8 +97,11 @@ public class EntityCacheItem {
   }
   if (!appLogs.getDetailLogs().isEmpty()) {
 if (store == null) {
-  store = new 

[49/50] [abbrv] hadoop git commit: HDFS-11430. Separate class InnerNode from class NetworkTopology and make it extendable. Contributed by Tsz Wo Nicholas Sze

2017-02-21 Thread xgong
HDFS-11430. Separate class InnerNode from class NetworkTopology and make it 
extendable. Contributed by Tsz Wo Nicholas Sze


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/003ae006
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/003ae006
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/003ae006

Branch: refs/heads/YARN-5734
Commit: 003ae00693d079799c4dcf02705379bcf34b8c79
Parents: 8ef7ebb
Author: Mingliang Liu 
Authored: Tue Feb 21 15:29:20 2017 -0800
Committer: Mingliang Liu 
Committed: Tue Feb 21 15:32:46 2017 -0800

--
 .../java/org/apache/hadoop/net/InnerNode.java   |  67 
 .../org/apache/hadoop/net/InnerNodeImpl.java| 304 +
 .../org/apache/hadoop/net/NetworkTopology.java  | 326 +--
 .../net/NetworkTopologyWithNodeGroup.java   |  43 +--
 .../apache/hadoop/net/TestNetworkTopology.java  |   2 +-
 5 files changed, 388 insertions(+), 354 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/003ae006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNode.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNode.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNode.java
new file mode 100644
index 000..d07929b
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNode.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.util.List;
+
+
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public interface InnerNode extends Node {
+  interface Factory {
+/** Construct an InnerNode from a path-like string */
+N newInnerNode(String path);
+  }
+
+  /** Add node n to the subtree of this node
+   * @param n node to be added
+   * @return true if the node is added; false otherwise
+   */
+  boolean add(Node n);
+
+  /** Given a node's string representation, return a reference to the node
+   * @param loc string location of the form /rack/node
+   * @return null if the node is not found or the childnode is there but
+   * not an instance of {@link InnerNodeImpl}
+   */
+  Node getLoc(String loc);
+
+  /** @return its children */
+  List getChildren();
+
+  /** @return the number of leave nodes. */
+  int getNumOfLeaves();
+
+  /** Remove node n from the subtree of this node
+   * @param n node to be deleted
+   * @return true if the node is deleted; false otherwise
+   */
+  boolean remove(Node n);
+
+  /** get leafIndex leaf of this subtree
+   * if it is not in the excludedNode
+   *
+   * @param leafIndex an indexed leaf of the node
+   * @param excludedNode an excluded node (can be null)
+   * @return
+   */
+  Node getLeaf(int leafIndex, Node excludedNode);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/003ae006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
new file mode 100644
index 000..e6aa0f7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
@@ -0,0 +1,304 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use 

[43/50] [abbrv] hadoop git commit: HDFS-11177. 'storagepolicies -getStoragePolicy' command should accept URI based path. (Contributed by Surendra Singh Lilhore)

2017-02-21 Thread xgong
HDFS-11177. 'storagepolicies -getStoragePolicy' command should accept URI based 
path. (Contributed by Surendra Singh Lilhore)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48040506
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48040506
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48040506

Branch: refs/heads/YARN-5734
Commit: 480405063063f564ae0cdb34e0757ac3990569aa
Parents: 6ba61d2
Author: Vinayakumar B 
Authored: Tue Feb 21 18:13:19 2017 +0530
Committer: Vinayakumar B 
Committed: Tue Feb 21 18:13:19 2017 +0530

--
 .../apache/hadoop/hdfs/tools/AdminHelper.java   | 12 ++
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   | 23 +++-
 .../hdfs/tools/TestStoragePolicyCommands.java   | 14 +++-
 3 files changed, 33 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48040506/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java
index 153fb36..8bab550 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import com.google.common.base.Preconditions;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -26,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.tools.TableListing;
 
 import java.io.IOException;
+import java.net.URI;
 import java.util.List;
 
 /**
@@ -48,6 +50,16 @@ public class AdminHelper {
 return (DistributedFileSystem)fs;
   }
 
+  static DistributedFileSystem getDFS(URI uri, Configuration conf)
+  throws IOException {
+FileSystem fs = FileSystem.get(uri, conf);
+if (!(fs instanceof DistributedFileSystem)) {
+  throw new IllegalArgumentException("FileSystem " + fs.getUri()
+  + " is not an HDFS file system");
+}
+return (DistributedFileSystem) fs;
+  }
+
   /**
* NN exceptions contain the stack trace as part of the exception message.
* When it's a known error, pretty-print the error and squish the stack 
trace.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48040506/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index d99b88a..4e4f018 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -148,9 +148,11 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
 return 1;
   }
 
-  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+  Path p = new Path(path);
+  final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
   try {
-HdfsFileStatus status = dfs.getClient().getFileInfo(path);
+HdfsFileStatus status = dfs.getClient().getFileInfo(
+Path.getPathWithoutSchemeAndAuthority(p).toString());
 if (status == null) {
   System.err.println("File/Directory does not exist: " + path);
   return 2;
@@ -161,9 +163,9 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
   return 0;
 }
 Collection policies = dfs.getAllStoragePolicies();
-for (BlockStoragePolicy p : policies) {
-  if (p.getId() == storagePolicyId) {
-System.out.println("The storage policy of " + path + ":\n" + p);
+for (BlockStoragePolicy policy : policies) {
+  if (policy.getId() == storagePolicyId) {
+System.out.println("The storage policy of " + path + ":\n" + 
policy);
 return 0;
   }
 }
@@ -215,10 +217,10 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
 getLongUsage());
 return 1;
   }
-
-  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+  Path p = new Path(path);
+  final DistributedFileSystem dfs = 

[46/50] [abbrv] hadoop git commit: HADOOP-14017. User friendly name for ADLS user and group. Contributed by Vishwajeet Dusane

2017-02-21 Thread xgong
HADOOP-14017. User friendly name for ADLS user and group. Contributed by 
Vishwajeet Dusane


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/924def78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/924def78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/924def78

Branch: refs/heads/YARN-5734
Commit: 924def78544a64449785f305cb6984c3559aea4d
Parents: 2158496
Author: Mingliang Liu 
Authored: Tue Feb 21 13:44:42 2017 -0800
Committer: Mingliang Liu 
Committed: Tue Feb 21 13:44:42 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  4 +++
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 23 +++---
 .../src/site/markdown/index.md  | 26 +++
 .../fs/adl/TestValidateConfiguration.java   |  9 ++
 .../apache/hadoop/fs/adl/live/TestMetadata.java | 33 
 5 files changed, 91 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/924def78/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 21120df..7d31103 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -87,6 +87,10 @@ public final class AdlConfKeys {
   "adl.feature.support.acl.bit";
   static final boolean ADL_SUPPORT_ACL_BIT_IN_FSPERMISSION_DEFAULT = true;
 
+  static final String ADL_ENABLEUPN_FOR_OWNERGROUP_KEY =
+  "adl.feature.ownerandgroup.enableupn";
+  static final boolean ADL_ENABLEUPN_FOR_OWNERGROUP_DEFAULT = false;
+
   private AdlConfKeys() {
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/924def78/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index fb0feda..e0e273e 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -32,6 +32,7 @@ import com.microsoft.azure.datalake.store.DirectoryEntry;
 import com.microsoft.azure.datalake.store.DirectoryEntryType;
 import com.microsoft.azure.datalake.store.IfExists;
 import com.microsoft.azure.datalake.store.LatencyTracker;
+import com.microsoft.azure.datalake.store.UserGroupRepresentation;
 import com.microsoft.azure.datalake.store.oauth2.AccessTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
@@ -80,6 +81,8 @@ public class AdlFileSystem extends FileSystem {
   private ADLStoreClient adlClient;
   private Path workingDirectory;
   private boolean aclBitStatus;
+  private UserGroupRepresentation oidOrUpn;
+
 
   // retained for tests
   private AccessTokenProvider tokenProvider;
@@ -181,6 +184,11 @@ public class AdlFileSystem extends FileSystem {
 if (!trackLatency) {
   LatencyTracker.disable();
 }
+
+boolean enableUPN = conf.getBoolean(ADL_ENABLEUPN_FOR_OWNERGROUP_KEY,
+ADL_ENABLEUPN_FOR_OWNERGROUP_DEFAULT);
+oidOrUpn = enableUPN ? UserGroupRepresentation.UPN :
+UserGroupRepresentation.OID;
   }
 
   /**
@@ -439,7 +447,8 @@ public class AdlFileSystem extends FileSystem {
   @Override
   public FileStatus getFileStatus(final Path f) throws IOException {
 statistics.incrementReadOps(1);
-DirectoryEntry entry = adlClient.getDirectoryEntry(toRelativeFilePath(f));
+DirectoryEntry entry =
+adlClient.getDirectoryEntry(toRelativeFilePath(f), oidOrUpn);
 return toFileStatus(entry, f);
   }
 
@@ -456,7 +465,7 @@ public class AdlFileSystem extends FileSystem {
   public FileStatus[] listStatus(final Path f) throws IOException {
 statistics.incrementReadOps(1);
 List entries =
-adlClient.enumerateDirectory(toRelativeFilePath(f));
+adlClient.enumerateDirectory(toRelativeFilePath(f), oidOrUpn);
 return toFileStatuses(entries, f);
   }
 
@@ -749,8 +758,8 @@ public class AdlFileSystem extends FileSystem {
   @Override
   public AclStatus getAclStatus(final Path path) throws IOException {
 

[40/50] [abbrv] hadoop git commit: HADOOP-14077. Add ability to access jmx via proxy. Contributed by Yuanbo Liu.

2017-02-21 Thread xgong
HADOOP-14077. Add ability to access jmx via proxy.  Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/172b23af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/172b23af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/172b23af

Branch: refs/heads/YARN-5734
Commit: 172b23af33554b7d58fd41b022d983bcc2433da7
Parents: 3a2e30f
Author: Eric Yang 
Authored: Sat Feb 18 18:34:13 2017 -0800
Committer: Eric Yang 
Committed: Sat Feb 18 18:34:13 2017 -0800

--
 .../AuthenticationWithProxyUserFilter.java  |  43 +++---
 .../hadoop/http/TestHttpServerWithSpengo.java   |  15 ++-
 .../mapreduce/v2/app/webapp/AppController.java  |   7 +-
 .../hadoop/yarn/server/webapp/AppBlock.java | 135 ++-
 4 files changed, 114 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/172b23af/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
index ea9b282..751cf02 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
@@ -17,10 +17,11 @@
  */
 package org.apache.hadoop.security;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.http.NameValuePair;
 import org.apache.http.client.utils.URLEncodedUtils;
 
@@ -41,6 +42,9 @@ import java.util.List;
  */
 public class AuthenticationWithProxyUserFilter extends AuthenticationFilter {
 
+  public static final Log LOG =
+  LogFactory.getLog(AuthenticationWithProxyUserFilter.class);
+
   /**
* Constant used in URL's query string to perform a proxy user request, the
* value of the DO_AS parameter is the user the request will be
@@ -66,29 +70,30 @@ public class AuthenticationWithProxyUserFilter extends 
AuthenticationFilter {
   protected void doFilter(FilterChain filterChain, HttpServletRequest request,
   HttpServletResponse response) throws IOException, ServletException {
 
-// authorize proxy user before calling next filter.
-String proxyUser = getDoAs(request);
+final String proxyUser = getDoAs(request);
 if (proxyUser != null) {
-  UserGroupInformation realUser =
-  UserGroupInformation.createRemoteUser(request.getRemoteUser());
-  UserGroupInformation proxyUserInfo =
-  UserGroupInformation.createProxyUser(proxyUser, realUser);
-
-  try {
-ProxyUsers.authorize(proxyUserInfo, request.getRemoteAddr());
-  } catch (AuthorizationException ex) {
-HttpExceptionUtils.createServletExceptionResponse(response,
-HttpServletResponse.SC_FORBIDDEN, ex);
-// stop filter chain if there is an Authorization Exception.
-return;
-  }
 
-  final UserGroupInformation finalProxyUser = proxyUserInfo;
   // Change the remote user after proxy user is authorized.
-  request = new HttpServletRequestWrapper(request) {
+  final HttpServletRequest finalReq = request;
+  request = new HttpServletRequestWrapper(finalReq) {
+
+private String getRemoteOrProxyUser() throws AuthorizationException {
+  UserGroupInformation realUser =
+  UserGroupInformation.createRemoteUser(finalReq.getRemoteUser());
+  UserGroupInformation proxyUserInfo =
+  UserGroupInformation.createProxyUser(proxyUser, realUser);
+  ProxyUsers.authorize(proxyUserInfo, finalReq.getRemoteAddr());
+  return proxyUserInfo.getUserName();
+}
+
 @Override
 public String getRemoteUser() {
-  return finalProxyUser.getUserName();
+  try {
+return getRemoteOrProxyUser();
+  } catch (AuthorizationException ex) {
+LOG.error("Unable to verify proxy user: " + ex.getMessage(), ex);
+  }
+  return null;
 }
   };
 


[45/50] [abbrv] hadoop git commit: HADOOP-13826. S3A Deadlock in multipart copy due to thread pool limits. Contributed by Sean Mackrory.

2017-02-21 Thread xgong
HADOOP-13826. S3A Deadlock in multipart copy due to thread pool limits. 
Contributed by  Sean Mackrory.

(cherry picked from commit e3a74e0369e6e2217d1280179b390227fe1b1684)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2158496f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2158496f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2158496f

Branch: refs/heads/YARN-5734
Commit: 2158496f6bed5f9d14751b82bd5d43b9fd786b95
Parents: a07ddef
Author: Steve Loughran 
Authored: Tue Feb 21 17:54:43 2017 +
Committer: Steve Loughran 
Committed: Tue Feb 21 18:28:49 2017 +

--
 .../s3a/BlockingThreadPoolExecutorService.java  |   2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  21 ++-
 .../fs/s3a/scale/ITestS3AConcurrentOps.java | 167 +++
 3 files changed, 184 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2158496f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BlockingThreadPoolExecutorService.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BlockingThreadPoolExecutorService.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BlockingThreadPoolExecutorService.java
index 5ff96a5..5b25730 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BlockingThreadPoolExecutorService.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BlockingThreadPoolExecutorService.java
@@ -86,7 +86,7 @@ final class BlockingThreadPoolExecutorService
* @return a thread factory that creates named, daemon threads with
* the supplied exception handler and normal priority
*/
-  private static ThreadFactory newDaemonThreadFactory(final String prefix) {
+  static ThreadFactory newDaemonThreadFactory(final String prefix) {
 final ThreadFactory namedFactory = getNamedThreadFactory(prefix);
 return new ThreadFactory() {
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2158496f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index bffc210..8b1a6d0 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -29,7 +29,10 @@ import java.util.Date;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ExecutorService;
 import java.util.Objects;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -131,7 +134,8 @@ public class S3AFileSystem extends FileSystem {
   private long partSize;
   private boolean enableMultiObjectsDelete;
   private TransferManager transfers;
-  private ListeningExecutorService threadPoolExecutor;
+  private ListeningExecutorService boundedThreadPool;
+  private ExecutorService unboundedThreadPool;
   private long multiPartThreshold;
   public static final Logger LOG = 
LoggerFactory.getLogger(S3AFileSystem.class);
   private static final Logger PROGRESS =
@@ -216,11 +220,17 @@ public class S3AFileSystem extends FileSystem {
   MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS, 1);
   long keepAliveTime = longOption(conf, KEEPALIVE_TIME,
   DEFAULT_KEEPALIVE_TIME, 0);
-  threadPoolExecutor = BlockingThreadPoolExecutorService.newInstance(
+  boundedThreadPool = BlockingThreadPoolExecutorService.newInstance(
   maxThreads,
   maxThreads + totalTasks,
   keepAliveTime, TimeUnit.SECONDS,
   "s3a-transfer-shared");
+  unboundedThreadPool = new ThreadPoolExecutor(
+  maxThreads, Integer.MAX_VALUE,
+  keepAliveTime, TimeUnit.SECONDS,
+  new LinkedBlockingQueue(),
+  BlockingThreadPoolExecutorService.newDaemonThreadFactory(
+  "s3a-transfer-unbounded"));
 
   initTransferManager();
 
@@ -307,7 +317,7 @@ public class S3AFileSystem extends FileSystem {
 transferConfiguration.setMultipartCopyPartSize(partSize);
 transferConfiguration.setMultipartCopyThreshold(multiPartThreshold);
 
-transfers = new TransferManager(s3, threadPoolExecutor);
+transfers = new TransferManager(s3, unboundedThreadPool);
 transfers.setConfiguration(transferConfiguration);
   }
 
@@ -585,7 +595,7 

[47/50] [abbrv] hadoop git commit: HDFS-11405. Rename "erasurecode" CLI subcommand to "ec". Contributed by Manoj Govindassamy.

2017-02-21 Thread xgong
HDFS-11405. Rename "erasurecode" CLI subcommand to "ec". Contributed by Manoj 
Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc9ad3ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc9ad3ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc9ad3ce

Branch: refs/heads/YARN-5734
Commit: fc9ad3ce3aa7d28974b0ac3b554089507c124783
Parents: 924def7
Author: Andrew Wang 
Authored: Tue Feb 21 13:55:27 2017 -0800
Committer: Andrew Wang 
Committed: Tue Feb 21 13:55:27 2017 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs| 4 ++--
 .../java/org/apache/hadoop/hdfs/tools/erasurecode/ECCli.java | 2 +-
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 4 ++--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md   | 4 ++--
 .../hadoop-hdfs/src/test/resources/testErasureCodingConf.xml | 2 +-
 5 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc9ad3ce/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index f095e9b..617adbe 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -41,7 +41,7 @@ function hadoop_usage
   hadoop_add_subcommand "dfsadmin" "run a DFS admin client"
   hadoop_add_subcommand "diskbalancer" "Distributes data evenly among disks on 
a given node"
   hadoop_add_subcommand "envvars" "display computed Hadoop environment 
variables"
-  hadoop_add_subcommand "erasurecode" "run a HDFS ErasureCoding CLI"
+  hadoop_add_subcommand "ec" "run a HDFS ErasureCoding CLI"
   hadoop_add_subcommand "fetchdt" "fetch a delegation token from the NameNode"
   hadoop_add_subcommand "fsck" "run a DFS filesystem checking utility"
   hadoop_add_subcommand "getconf" "get config values from configuration"
@@ -129,7 +129,7 @@ function hdfscmd_case
   echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
   exit 0
 ;;
-erasurecode)
+ec)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.erasurecode.ECCli
 ;;
 fetchdt)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc9ad3ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCli.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCli.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCli.java
index 48574d3..89dd4ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCli.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCli.java
@@ -32,7 +32,7 @@ import java.io.IOException;
 public class ECCli extends FsShell {
 
   private final static String usagePrefix =
-  "Usage: hdfs erasurecode [generic options]";
+  "Usage: hdfs ec [generic options]";
 
   @Override
   protected String getUsagePrefix() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc9ad3ce/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 1a4465c..b65cc78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -431,11 +431,11 @@ Usage:
 
 Runs the diskbalancer CLI. See [HDFS Diskbalancer](./HDFSDiskbalancer.html) 
for more information on this command.
 
-### `erasurecode`
+### `ec`
 
 Usage:
 
-   hdfs erasurecode [generic options]
+   hdfs ec [generic options]
  [-setPolicy [-p ] ]
  [-getPolicy ]
  [-listPolicies]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc9ad3ce/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 8c504ac..6e4891f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -116,9 +116,9 @@ Deployment
 
 ### Administrative commands
 
-  HDFS provides an `erasurecode` subcommand 

[48/50] [abbrv] hadoop git commit: HDFS-11406. Remove unused getStartInstance and getFinalizeInstance in FSEditLogOp. Contributed by Alison Yu.

2017-02-21 Thread xgong
HDFS-11406. Remove unused getStartInstance and getFinalizeInstance in 
FSEditLogOp. Contributed by Alison Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ef7ebbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ef7ebbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ef7ebbc

Branch: refs/heads/YARN-5734
Commit: 8ef7ebbc7112e1868c9b12ff1df4a40fe7afa8af
Parents: fc9ad3c
Author: Andrew Wang 
Authored: Tue Feb 21 14:54:20 2017 -0800
Committer: Andrew Wang 
Committed: Tue Feb 21 14:54:20 2017 -0800

--
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java  | 8 
 1 file changed, 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ef7ebbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 6293557..f93e867 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -4351,14 +4351,6 @@ public abstract class FSEditLogOp {
   this.name = StringUtils.toUpperCase(name);
 }
 
-static RollingUpgradeOp getStartInstance(OpInstanceCache cache) {
-  return (RollingUpgradeOp) cache.get(OP_ROLLING_UPGRADE_START);
-}
-
-static RollingUpgradeOp getFinalizeInstance(OpInstanceCache cache) {
-  return (RollingUpgradeOp) cache.get(OP_ROLLING_UPGRADE_FINALIZE);
-}
-
 @Override
 void resetSubFields() {
   time = 0L;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-5951. Changes to allow CapacityScheduler to use configuration store

2017-02-21 Thread xgong
YARN-5951. Changes to allow CapacityScheduler to use configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e1a5440
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e1a5440
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e1a5440

Branch: refs/heads/YARN-5734
Commit: 6e1a54403e8e4d43ce96c9c0cda4cb465ac8e2f3
Parents: 003ae00
Author: Jonathan Hung 
Authored: Mon Jan 30 19:03:48 2017 -0800
Committer: Xuan 
Committed: Tue Feb 21 15:56:59 2017 -0800

--
 .../scheduler/capacity/CapacityScheduler.java   | 36 +--
 .../CapacitySchedulerConfiguration.java | 10 +++
 .../capacity/conf/CSConfigurationProvider.java  | 46 ++
 .../conf/FileBasedCSConfigurationProvider.java  | 67 
 .../scheduler/capacity/conf/package-info.java   | 29 +
 .../capacity/TestCapacityScheduler.java |  4 +-
 6 files changed, 170 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e1a5440/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 3517764..de0f162 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -108,6 +107,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -166,6 +167,7 @@ public class CapacityScheduler extends
 
   private int offswitchPerHeartbeatLimit;
 
+  private CSConfigurationProvider csConfProvider;
 
   @Override
   public void setConf(Configuration conf) {
@@ -289,7 +291,18 @@ public class CapacityScheduler extends
   IOException {
 try {
   writeLock.lock();
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  String confProviderStr = configuration.get(
+  CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+  CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
+  if (confProviderStr.equals(
+  CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
+this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
+  } else {
+throw new IOException("Invalid CS configuration provider: " +
+confProviderStr);
+  }
+  this.csConfProvider.init(configuration);
+  this.conf = this.csConfProvider.loadConfiguration(configuration);
   validateConf(this.conf);
   this.minimumAllocation = this.conf.getMinimumAllocation();
   initMaximumResourceCapability(this.conf.getMaximumAllocation());
@@ -396,7 +409,7 @@ public class CapacityScheduler extends
   writeLock.lock();
   Configuration configuration = new Configuration(newConf);
   CapacitySchedulerConfiguration oldConf = this.conf;
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  this.conf = csConfProvider.loadConfiguration(configuration);
 

[42/50] [abbrv] hadoop git commit: YARN-6159. Documentation changes for TimelineV2Client (Naganarasimha G R via Varun Saxena)

2017-02-21 Thread xgong
YARN-6159. Documentation changes for TimelineV2Client (Naganarasimha G R via 
Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ba61d20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ba61d20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ba61d20

Branch: refs/heads/YARN-5734
Commit: 6ba61d20d3f65e40ea8e3a49d5beebe34f04aab4
Parents: 8035749
Author: Varun Saxena 
Authored: Tue Feb 21 12:25:37 2017 +0530
Committer: Varun Saxena 
Committed: Tue Feb 21 12:25:37 2017 +0530

--
 .../src/site/markdown/TimelineServiceV2.md  | 44 
 1 file changed, 18 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba61d20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 90c7a89..dc16803 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -257,56 +257,48 @@ To write MapReduce framework data to Timeline Service 
v.2, enable the following
 
 This section is for YARN application developers that want to integrate with 
Timeline Service v.2.
 
-Developers can continue to use the `TimelineClient` API to publish 
per-framework data to the
-Timeline Service v.2. You only need to instantiate the right type of the 
client to write to v.2.
-On the other hand, the entity/object API for v.2 is different than v.1 as the 
object model is
-significantly changed. The v.2 timeline entity class is
-`org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity` whereas 
the v.1 class is
-`org.apache.hadoop.yarn.api.records.timeline.TimelineEntity`. The methods on 
`TimelineClient`
-suitable for writing to Timeline Service v.2 are clearly delineated, and they 
use the v.2
-types as arguments.
+Developers need to use the `TimelineV2Client` API to publish per-framework 
data to the
+Timeline Service v.2. The entity/object API for v.2 is different than v.1 as
+the object model is significantly changed. The v.2 timeline entity class is
+`org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity`.
 
 Timeline Service v.2 `putEntities` methods come in 2 varieties: `putEntities` 
and
 `putEntitiesAsync`. The former is a blocking operation which must be used for 
writing more
 critical data (e.g. lifecycle events). The latter is a non-blocking operation. 
Note that neither
 has a return value.
 
-Creating a `TimelineClient` for v.2 involves passing in the application id to 
the factory method.
+Creating a `TimelineV2Client` involves passing in the application id to the 
static method
+`TimelineV2Client.createTimelineClient`.
 
 For example:
 
 
 // Create and start the Timeline client v.2
-TimelineClient client = TimelineClient.createTimelineClient(appId);
-client.init(conf);
-client.start();
+TimelineV2Client timelineClient =
+TimelineV2Client.createTimelineClient(appId);
+timelineClient.init(conf);
+timelineClient.start();
 
 try {
   TimelineEntity myEntity = new TimelineEntity();
-  myEntity.setEntityType("MY_APPLICATION");
-  myEntity.setEntityId("MyApp1")
+  myEntity.setType("MY_APPLICATION");
+  myEntity.setId("MyApp1");
   // Compose other entity info
 
   // Blocking write
-  client.putEntities(entity);
+  timelineClient.putEntities(myEntity);
 
   TimelineEntity myEntity2 = new TimelineEntity();
   // Compose other info
 
   // Non-blocking write
-  timelineClient.putEntitiesAsync(entity);
+  timelineClient.putEntitiesAsync(myEntity2);
 
-} catch (IOException e) {
-  // Handle the exception
-} catch (RuntimeException e) {
-  // In Hadoop 2.6, if attempts submit information to the Timeline Server 
fail more than the retry limit,
-  // a RuntimeException will be raised. This may change in future 
releases, being
-  // replaced with a IOException that is (or wraps) that which triggered 
retry failures.
-} catch (YarnException e) {
+} catch (IOException | YarnException e) {
   // Handle the exception
 } finally {
   // Stop the Timeline client
-  client.stop();
+  timelineClient.stop();
 }
 
 As evidenced above, you need to specify the YARN application id to be able to 
write to the Timeline
@@ -314,9 +306,9 @@ Service v.2. Note that currently you need to be on the 
cluster to be able to wri
 

[44/50] [abbrv] hadoop git commit: HDFS-11404. Increase timeout on TestShortCircuitLocalRead.testDeprecatedGetBlockLocalPathInfoRpc. Contributed by Eric Badger

2017-02-21 Thread xgong
HDFS-11404. Increase timeout on 
TestShortCircuitLocalRead.testDeprecatedGetBlockLocalPathInfoRpc. Contributed 
by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a07ddef1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a07ddef1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a07ddef1

Branch: refs/heads/YARN-5734
Commit: a07ddef10115fd0082832f1c338b2484507a8f49
Parents: 4804050
Author: Eric Payne 
Authored: Tue Feb 21 12:04:25 2017 -0600
Committer: Eric Payne 
Committed: Tue Feb 21 12:04:25 2017 -0600

--
 .../apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a07ddef1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index a7132b8..55e9795 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -353,7 +353,7 @@ public class TestShortCircuitLocalRead {
 });
   }
 
-  @Test(timeout=1)
+  @Test(timeout=6)
   public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
 final Configuration conf = new Configuration();
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: YARN-6156. AM blacklisting to consider node label partition (Bibin A Chundatt via Varun Saxena)

2017-02-21 Thread xgong
YARN-6156. AM blacklisting to consider node label partition (Bibin A Chundatt 
via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7613e0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7613e0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7613e0f

Branch: refs/heads/YARN-5734
Commit: b7613e0f406fb2b9bd5b1b3c79658e801f63c587
Parents: cd3e59a
Author: Varun Saxena 
Authored: Wed Feb 15 14:48:17 2017 +0530
Committer: Varun Saxena 
Committed: Wed Feb 15 14:48:17 2017 +0530

--
 .../server/resourcemanager/RMServerUtils.java   | 22 ++
 .../nodelabels/RMNodeLabelsManager.java | 16 +
 .../server/resourcemanager/rmapp/RMAppImpl.java | 12 ++--
 .../rmapp/attempt/RMAppAttemptImpl.java |  4 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 73 
 5 files changed, 118 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7613e0f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 224a1da..e98141b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt
 .RMAppAttemptState;
@@ -561,4 +562,25 @@ public class RMServerUtils {
 }
 return newApplicationTimeout;
   }
+
+  /**
+   * Get applicable Node count for AM.
+   *
+   * @param rmContext context
+   * @param conf configuration
+   * @param amreq am resource request
+   * @return applicable node count
+   */
+  public static int getApplicableNodeCountForAM(RMContext rmContext,
+  Configuration conf, ResourceRequest amreq) {
+if (YarnConfiguration.areNodeLabelsEnabled(conf)) {
+  RMNodeLabelsManager labelManager = rmContext.getNodeLabelManager();
+  String amNodeLabelExpression = amreq.getNodeLabelExpression();
+  amNodeLabelExpression = (amNodeLabelExpression == null
+  || amNodeLabelExpression.trim().isEmpty())
+  ? RMNodeLabelsManager.NO_LABEL : amNodeLabelExpression;
+  return labelManager.getActiveNMCountPerLabel(amNodeLabelExpression);
+}
+return rmContext.getScheduler().getNumClusterNodes();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7613e0f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
index 5dc8392..effe422 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
@@ -350,6 +350,22 @@ public class RMNodeLabelsManager extends 
CommonNodeLabelsManager {
 }
   }
   
+  /*
+   * Get active node count based on label.
+   */
+  public int getActiveNMCountPerLabel(String label) {
+if (label == null) {
+   

[12/50] [abbrv] hadoop git commit: HADOOP-14072. AliyunOSS: Failed to read from stream when seek beyond the download size. Contributed by Genmao Yu

2017-02-21 Thread xgong
HADOOP-14072. AliyunOSS: Failed to read from stream when seek beyond the 
download size. Contributed by Genmao Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd3e59a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd3e59a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd3e59a3

Branch: refs/heads/YARN-5734
Commit: cd3e59a3dcc69f68711777d448da5228a55846b3
Parents: 8acb376
Author: Kai Zheng 
Authored: Wed Feb 15 16:34:30 2017 +0800
Committer: Kai Zheng 
Committed: Wed Feb 15 16:34:30 2017 +0800

--
 .../fs/aliyun/oss/AliyunOSSInputStream.java |  4 ++-
 .../oss/contract/TestAliyunOSSContractSeek.java | 26 
 2 files changed, 29 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd3e59a3/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
index a3af7ce..72ba619 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
@@ -224,8 +224,10 @@ public class AliyunOSSInputStream extends FSInputStream {
 if (position == pos) {
   return;
 } else if (pos > position && pos < position + partRemaining) {
-  AliyunOSSUtils.skipFully(wrappedStream, pos - position);
+  long len = pos - position;
+  AliyunOSSUtils.skipFully(wrappedStream, len);
   position = pos;
+  partRemaining -= len;
 } else {
   reopen(pos);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd3e59a3/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java
 
b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java
index b247ab1..d9b3674 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java
@@ -19,8 +19,15 @@
 package org.apache.hadoop.fs.aliyun.oss.contract;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 
 /**
  * Aliyun OSS contract seeking tests.
@@ -31,4 +38,23 @@ public class TestAliyunOSSContractSeek extends 
AbstractContractSeekTest {
   protected AbstractFSContract createContract(Configuration conf) {
 return new AliyunOSSContract(conf);
   }
+
+  @Test
+  public void testSeekBeyondDownloadSize() throws Throwable {
+describe("seek and read beyond download size.");
+
+Path byteFile = path("byte_file.txt");
+// 'fs.oss.multipart.download.size' = 100 * 1024
+byte[] block = dataset(100 * 1024 + 10, 0, 255);
+FileSystem fs = getFileSystem();
+createFile(fs, byteFile, true, block);
+
+FSDataInputStream instream = getFileSystem().open(byteFile);
+instream.seek(100 * 1024 - 1);
+assertEquals(100 * 1024 - 1, instream.getPos());
+assertEquals(144, instream.read());
+instream.seek(100 * 1024 + 1);
+assertEquals(100 * 1024 + 1, instream.getPos());
+assertEquals(146, instream.read());
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HADOOP-14092. Typo in hadoop-aws index.md. Contributed by John Zhuge

2017-02-21 Thread xgong
HADOOP-14092. Typo in hadoop-aws index.md. Contributed by John Zhuge

(cherry picked from commit b1c1f05b1dc997906390d653dfafb4f0d7e193c4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a2e30fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a2e30fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a2e30fa

Branch: refs/heads/YARN-5734
Commit: 3a2e30fa9fe692fe44666c78fbaa04e8469f9d17
Parents: dbbfcf7
Author: Steve Loughran 
Authored: Sat Feb 18 18:16:19 2017 +
Committer: Steve Loughran 
Committed: Sat Feb 18 18:17:11 2017 +

--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a2e30fa/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 0ff314c..7815bcf 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -2250,7 +2250,7 @@ like `ITestS3A*` shown above, it may cause unpredictable 
test failures.
 ### Testing against different regions
 
 S3A can connect to different regions —the tests support this. Simply
-define the target region in `contract-tests.xml` or any `auth-keys.xml`
+define the target region in `contract-test-options.xml` or any `auth-keys.xml`
 file referenced.
 
 ```xml


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-21 Thread xgong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fa1afdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
new file mode 100644
index 000..b5b5f77
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
@@ -0,0 +1,440 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.ConnectException;
+import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URL;
+import java.net.URLConnection;
+import java.security.GeneralSecurityException;
+import java.security.PrivilegedExceptionAction;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.api.client.filter.ClientFilter;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Utility Connector class which is used by timeline clients to securely get
+ * connected to the timeline server.
+ *
+ */
+public class TimelineConnector extends AbstractService {
+
+  private static final Joiner JOINER = Joiner.on("");
+  private static final Log LOG = LogFactory.getLog(TimelineConnector.class);
+  public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+
+  private SSLFactory sslFactory;
+  private Client client;
+  private ConnectionConfigurator connConfigurator;
+  private DelegationTokenAuthenticator authenticator;
+  private DelegationTokenAuthenticatedURL.Token token;
+  private UserGroupInformation authUgi;
+  private String doAsUser;
+  @VisibleForTesting
+  TimelineClientConnectionRetry connectionRetry;
+  private boolean requireConnectionRetry;
+
+  public TimelineConnector(boolean requireConnectionRetry,
+  UserGroupInformation authUgi, String doAsUser,
+  DelegationTokenAuthenticatedURL.Token token) {
+super("TimelineConnector");
+this.requireConnectionRetry = 

[32/50] [abbrv] hadoop git commit: HADOOP-14019. Fix some typos in the s3a docs. Contributed by Steve Loughran

2017-02-21 Thread xgong
HADOOP-14019. Fix some typos in the s3a docs. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdad8b7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdad8b7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdad8b7b

Branch: refs/heads/YARN-5734
Commit: bdad8b7b97d7f48119f016d68f32982d680c8796
Parents: f432999
Author: Mingliang Liu 
Authored: Thu Feb 16 16:41:31 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 16 16:41:31 2017 -0800

--
 .../src/site/markdown/tools/hadoop-aws/index.md | 16 +---
 1 file changed, 13 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdad8b7b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 2471a52..0ff314c 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -970,7 +970,7 @@ This is because the property values are kept in these 
files, and cannot be
 dynamically patched.
 
 Instead, callers need to create different configuration files for each
-bucket, setting the base secrets (`fs.s3a.bucket.nightly.access.key`, etc),
+bucket, setting the base secrets (`fs.s3a.access.key`, etc),
 then declare the path to the appropriate credential file in
 a bucket-specific version of the property 
`fs.s3a.security.credential.provider.path`.
 
@@ -1044,7 +1044,7 @@ declaration. For example:
 ### Stabilizing: S3A Fast Upload
 
 
-**New in Hadoop 2.7; significantly enhanced in Hadoop 2.9**
+**New in Hadoop 2.7; significantly enhanced in Hadoop 2.8**
 
 
 Because of the nature of the S3 object store, data written to an S3A 
`OutputStream`
@@ -1204,8 +1204,18 @@ consumed, and so eliminates heap size as the limiting 
factor in queued uploads
   disk
 
 
+
+  fs.s3a.buffer.dir
+  
+  Comma separated list of temporary directories use for
+  storing blocks of data prior to their being uploaded to S3.
+  When unset, the Hadoop temporary directory hadoop.tmp.dir is 
used
+
+
 ```
 
+This is the default buffer mechanism. The amount of data which can
+be buffered is limited by the amount of available disk space.
 
  Fast Upload with ByteBuffers: 
`fs.s3a.fast.upload.buffer=bytebuffer`
 
@@ -1219,7 +1229,7 @@ The amount of data which can be buffered is
 limited by the Java runtime, the operating system, and, for YARN applications,
 the amount of memory requested for each container.
 
-The slower the write bandwidth to S3, the greater the risk of running out
+The slower the upload bandwidth to S3, the greater the risk of running out
 of memory —and so the more care is needed in
 [tuning the upload settings](#s3a_fast_upload_thread_tuning).
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: YARN-6061. Add an UncaughtExceptionHandler for critical threads in RM. (Yufei Gu via kasha)

2017-02-21 Thread xgong
YARN-6061. Add an UncaughtExceptionHandler for critical threads in RM. (Yufei 
Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/652679aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/652679aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/652679aa

Branch: refs/heads/YARN-5734
Commit: 652679aa8ad6f9e61b8ed8e2b04b3e0332025e94
Parents: aaf2713
Author: Karthik Kambatla 
Authored: Tue Feb 14 13:39:34 2017 -0800
Committer: Karthik Kambatla 
Committed: Tue Feb 14 13:39:41 2017 -0800

--
 .../hadoop/yarn/client/TestRMFailover.java  | 100 ++-
 .../yarn/server/resourcemanager/RMContext.java  |   2 +
 .../server/resourcemanager/RMContextImpl.java   |  10 ++
 ...MCriticalThreadUncaughtExceptionHandler.java |  58 +++
 .../resourcemanager/RMFatalEventType.java   |   5 +-
 .../server/resourcemanager/ResourceManager.java |  65 +---
 .../resourcemanager/recovery/RMStateStore.java  |  13 +--
 .../DominantResourceFairnessPolicy.java |   2 +-
 8 files changed, 226 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/652679aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index b58a775..4bf6a78 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -22,7 +22,10 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
 
 import java.io.IOException;
 import java.net.HttpURLConnection;
@@ -37,14 +40,18 @@ import org.apache.hadoop.ha.ClientBaseWithFixes;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
 import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.RMCriticalThreadUncaughtExceptionHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.junit.After;
@@ -174,7 +181,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
 // so it transitions to standby.
 ResourceManager rm = cluster.getResourceManager(
 cluster.getActiveRMIndex());
-rm.handleTransitionToStandBy();
+rm.handleTransitionToStandByInNewThread();
 int maxWaitingAttempts = 2000;
 while (maxWaitingAttempts-- > 0 ) {
   if (rm.getRMContext().getHAServiceState() == HAServiceState.STANDBY) {
@@ -349,4 +356,95 @@ public class TestRMFailover extends ClientBaseWithFixes {
 }
 return redirectUrl;
   }
+
+  /**
+   * Throw {@link RuntimeException} inside a thread of
+   * {@link ResourceManager} with HA enabled and check if the
+   * {@link ResourceManager} is transited to standby state.
+   *
+   * @throws InterruptedException if any
+   */
+  @Test
+  public void testUncaughtExceptionHandlerWithHAEnabled()
+  throws InterruptedException {
+conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster");
+conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
+cluster.init(conf);
+cluster.start();
+assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
+
+ResourceManager resourceManager = cluster.getResourceManager(
+cluster.getActiveRMIndex());
+
+final RMCriticalThreadUncaughtExceptionHandler exHandler =
+new 

[20/50] [abbrv] hadoop git commit: HDFS-11265. Extend visualization for Maintenance Mode under Datanode tab in the NameNode UI. (Marton Elek via mingma)

2017-02-21 Thread xgong
HDFS-11265. Extend visualization for Maintenance Mode under Datanode tab in the 
NameNode UI. (Marton Elek via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a136936d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a136936d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a136936d

Branch: refs/heads/YARN-5734
Commit: a136936d018b5cebb7aad9a01ea0dcc366e1c3b8
Parents: 0741dd3
Author: Ming Ma 
Authored: Wed Feb 15 20:24:07 2017 -0800
Committer: Ming Ma 
Committed: Wed Feb 15 20:24:07 2017 -0800

--
 .../hadoop-hdfs/src/main/webapps/static/hadoop.css  | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a136936d/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
index 0901125..341e1f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
@@ -236,8 +236,8 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 }
 
 .dfshealth-node-decommissioned:before {
-color: #eea236;
-content: "\e136";
+color: #bc5f04;
+content: "\e090";
 }
 
 .dfshealth-node-down:before {
@@ -250,6 +250,11 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 content: "\e017";
 }
 
+.dfshealth-node-down-maintenance:before {
+color: #eea236;
+content: "\e136";
+}
+
 .dfshealth-node-legend {
 list-style-type: none;
 text-align: right;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HADOOP-14040. Use shaded aws-sdk uber-JAR 1.11.86. Contributed by Steve Loughran and Sean Mackrory

2017-02-21 Thread xgong
HADOOP-14040. Use shaded aws-sdk uber-JAR 1.11.86. Contributed by Steve 
Loughran and Sean Mackrory


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/658702ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/658702ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/658702ef

Branch: refs/heads/YARN-5734
Commit: 658702efffdf52cf5ddf8e92f959f1157c95a348
Parents: bdad8b7
Author: Mingliang Liu 
Authored: Thu Feb 16 16:51:03 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 16 16:51:03 2017 -0800

--
 hadoop-project/pom.xml  | 9 ++---
 hadoop-tools/hadoop-aws/pom.xml | 7 +--
 2 files changed, 3 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/658702ef/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 606f7fc..47e21d8 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -126,7 +126,7 @@
 1.0-beta-1
 1.0-alpha-8
 900
-1.11.45
+1.11.86
 
 ${project.version}
@@ -791,12 +791,7 @@
   
   
 com.amazonaws
-aws-java-sdk-s3
-${aws-java-sdk.version}
-  
-  
-com.amazonaws
-aws-java-sdk-sts
+aws-java-sdk-bundle
 ${aws-java-sdk.version}
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/658702ef/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 1f64b02..0fdbc5d 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -331,7 +331,7 @@
 
 
   com.amazonaws
-  aws-java-sdk-s3
+  aws-java-sdk-bundle
   compile
 
 
@@ -355,11 +355,6 @@
   joda-time
 
 
-  com.amazonaws
-  aws-java-sdk-sts
-  test
-
-
   junit
   junit
   test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: YARN-6177. Yarn client should exit with an informative error message if an incompatible Jersey library is used at client. Contributed by Weiwei Yang.

2017-02-21 Thread xgong
YARN-6177. Yarn client should exit with an informative error message if an 
incompatible Jersey library is used at client. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d339c46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d339c46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d339c46

Branch: refs/heads/YARN-5734
Commit: 5d339c46f5b16b951afd82afd9e907b9aa2ded9a
Parents: 4fa1afd
Author: Li Lu 
Authored: Thu Feb 16 13:40:26 2017 -0800
Committer: Li Lu 
Committed: Thu Feb 16 13:41:42 2017 -0800

--
 .../yarn/client/api/impl/YarnClientImpl.java|  10 ++
 .../yarn/client/api/impl/TestYarnClient.java| 165 ---
 2 files changed, 155 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d339c46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 4a27fee..23b128c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -381,6 +381,16 @@ public class YarnClientImpl extends YarnClient {
 return null;
   }
   throw e;
+} catch (NoClassDefFoundError e) {
+  NoClassDefFoundError wrappedError = new NoClassDefFoundError(
+  e.getMessage() + ". It appears that the timeline client "
+  + "failed to initiate because an incompatible dependency "
+  + "in classpath. If timeline service is optional to this "
+  + "client, try to work around by setting "
+  + YarnConfiguration.TIMELINE_SERVICE_ENABLED
+  + " to false in client configuration.");
+  wrappedError.setStackTrace(e.getStackTrace());
+  throw wrappedError;
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d339c46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 240f31c..c2c9665 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -156,26 +156,6 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testStartWithTimelineV15Failure() throws Exception{
-Configuration conf = new Configuration();
-conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f);
-conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT,
-true);
-YarnClient client = YarnClient.createYarnClient();
-if(client instanceof YarnClientImpl) {
-  YarnClientImpl impl = (YarnClientImpl) client;
-  YarnClientImpl spyClient = spy(impl);
-  when(spyClient.createTimelineClient()).thenThrow(
-  new IOException("ATS v1.5 client initialization failed. "));
-  spyClient.init(conf);
-  spyClient.start();
-  spyClient.getTimelineDelegationToken();
-  spyClient.stop();
-}
-  }
-
-  @Test
   public void testStartWithTimelineV15() throws Exception {
 Configuration conf = new Configuration();
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
@@ -186,6 +166,89 @@ public class TestYarnClient {
 client.stop();
   }
 
+  @Test
+  public void testStartTimelineClientWithErrors()
+  throws Exception {
+// If timeline client failed to init with a NoClassDefFoundError
+// it should be wrapped with an informative error message
+testCreateTimelineClientWithError(
+1.5f,
+true,
+false,
+new NoClassDefFoundError("Mock a NoClassDefFoundError"),
+new CreateTimelineClientErrorVerifier(1) {
+  @Override
+  public void verifyError(Throwable e) {
+  

[35/50] [abbrv] hadoop git commit: HADOOP-13805. UGI.getCurrentUser() fails if user does not have a keytab associated. Contributed by Xiao Chen, Wei-Chiu Chuang, Yongjun Zhang.

2017-02-21 Thread xgong
HADOOP-13805. UGI.getCurrentUser() fails if user does not have a keytab 
associated. Contributed by Xiao Chen, Wei-Chiu Chuang, Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c26c241
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c26c241
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c26c241

Branch: refs/heads/YARN-5734
Commit: 4c26c241ad2b907dc02cecefa9846cbe2b0465ba
Parents: 02c5494
Author: Yongjun Zhang 
Authored: Thu Feb 16 22:25:37 2017 -0800
Committer: Yongjun Zhang 
Committed: Fri Feb 17 09:18:50 2017 -0800

--
 .../hadoop/fs/CommonConfigurationKeys.java  |  11 ++
 .../hadoop/security/UserGroupInformation.java   | 129 +++
 .../hadoop/security/TestUGIWithMiniKdc.java |   1 +
 .../security/TestUserGroupInformation.java  |  16 ++-
 4 files changed, 127 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c26c241/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index b8a60d6..e53f71e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -353,6 +353,17 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final String HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS =
 "hadoop.user.group.metrics.percentiles.intervals";
 
+  /* When creating UGI with UserGroupInformation(Subject), treat the passed
+   * subject external if set to true, and assume the owner of the subject
+   * should do the credential renewal.
+   *
+   * This is a temporary config to solve the compatibility issue with
+   * HADOOP-13558 and HADOOP-13805 fix, see the jiras for discussions.
+   */
+  public static final String HADOOP_TREAT_SUBJECT_EXTERNAL_KEY =
+  "hadoop.treat.subject.external";
+  public static final boolean HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT = false;
+
   public static final String RPC_METRICS_QUANTILE_ENABLE =
   "rpc.metrics.quantile.enable";
   public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c26c241/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 6574e55..a5c6226 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.security;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TREAT_SUBJECT_EXTERNAL_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FILES;
@@ -79,6 +81,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -273,6 +276,29 @@ public class UserGroupInformation {
   /** Min time (in seconds) before relogin for Kerberos */
   private static long kerberosMinSecondsBeforeRelogin;
   /** The configuration to use */
+
+  /*
+   * This config is a temporary one for backward compatibility.
+   * It means whether to treat the subject passed to
+   * UserGroupInformation(Subject) as external. If true,
+   * -  no renewal thread will be created to do the renew credential
+   * -  reloginFromKeytab() and reloginFromTicketCache will not renew
+   *credential.
+   * and it assumes that the owner 

[36/50] [abbrv] hadoop git commit: YARN-6188. Fix OOM issue with decommissioningNodesWatcher in the case of clusters with large number of nodes (Contributed by Ajay Jadhav via Daniel Templeton)

2017-02-21 Thread xgong
YARN-6188. Fix OOM issue with decommissioningNodesWatcher in the case of 
clusters with
large number of nodes (Contributed by Ajay Jadhav via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a928377
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a928377
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a928377

Branch: refs/heads/YARN-5734
Commit: 9a928377868dfb2dc846c340501b3248eb6ad77f
Parents: 4c26c24
Author: Daniel Templeton 
Authored: Fri Feb 17 13:11:43 2017 -0800
Committer: Daniel Templeton 
Committed: Fri Feb 17 13:13:46 2017 -0800

--
 .../yarn/server/resourcemanager/DecommissioningNodesWatcher.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a928377/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java
index 376b503..9631803 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java
@@ -385,9 +385,9 @@ public class DecommissioningNodesWatcher {
 if (!LOG.isDebugEnabled() || decomNodes.size() == 0) {
   return;
 }
-StringBuilder sb = new StringBuilder();
 long now = mclock.getTime();
 for (DecommissioningNodeContext d : decomNodes.values()) {
+  StringBuilder sb = new StringBuilder();
   DecommissioningNodeStatus s = checkDecommissioningStatus(d.nodeId);
   sb.append(String.format(
   "%n  %-34s %4ds fresh:%3ds containers:%2d %14s",
@@ -413,8 +413,8 @@ public class DecommissioningNodesWatcher {
   (mclock.getTime() - rmApp.getStartTime()) / 1000));
 }
   }
+  LOG.debug("Decommissioning node: " + sb.toString());
 }
-LOG.info("Decommissioning Nodes: " + sb.toString());
   }
 
   // Read possible new DECOMMISSIONING_TIMEOUT_KEY from yarn-site.xml.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: YARN-6125. The application attempt's diagnostic message should have a maximum size (Contributed by Andras Piros via Daniel Templeton)

2017-02-21 Thread xgong
YARN-6125. The application attempt's diagnostic message should have a maximum 
size
(Contributed by Andras Piros via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7a36e61
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7a36e61
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7a36e61

Branch: refs/heads/YARN-5734
Commit: c7a36e613053ec8b46004b887c2f13535469
Parents: 9a92837
Author: Daniel Templeton 
Authored: Fri Feb 17 13:40:58 2017 -0800
Committer: Daniel Templeton 
Committed: Fri Feb 17 13:40:58 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   5 +
 .../src/main/resources/yarn-default.xml |  15 ++
 .../hadoop-yarn-server-resourcemanager/pom.xml  |  16 +-
 .../rmapp/attempt/RMAppAttemptImpl.java | 172 ++-
 .../rmapp/attempt/TestBoundedAppender.java  | 116 +
 .../TestRMAppAttemptImplDiagnostics.java| 111 
 6 files changed, 422 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7a36e61/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 136227a..094a424 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2619,6 +2619,11 @@ public class YarnConfiguration extends Configuration {
 
   public static final int DEFAULT_CLUSTER_LEVEL_APPLICATION_PRIORITY = 0;
 
+  public static final String APP_ATTEMPT_DIAGNOSTICS_LIMIT_KC =
+  YARN_PREFIX + "app.attempt.diagnostics.limit.kc";
+
+  public static final int DEFAULT_APP_ATTEMPT_DIAGNOSTICS_LIMIT_KC = 64;
+
   @Private
   public static boolean isDistributedNodeLabelConfiguration(Configuration 
conf) {
 return DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE.equals(conf.get(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7a36e61/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 4ca46f9..53beb5e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3022,4 +3022,19 @@
 3000
   
 
+  
+
+  Defines the limit of the diagnostics message of an application
+  attempt, in kilo characters (character count * 1024).
+  When using ZooKeeper to store application state behavior, it's
+  important to limit the size of the diagnostic messages to
+  prevent YARN from overwhelming ZooKeeper. In cases where
+  yarn.resourcemanager.state-store.max-completed-applications is set to
+  a large number, it may be desirable to reduce the value of this property
+  to limit the total data stored.
+
+yarn.app.attempt.diagnostics.limit.kc
+64
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7a36e61/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 6985d65..0a85d0c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -52,8 +52,17 @@
   org.apache.hadoop
   hadoop-annotations
 
+
 
-  org.mockito
+  junit
+  junit
+  test
+
+
+org.mockito
   mockito-all
   test
 
@@ -73,11 +82,6 @@
   protobuf-java
 
 
-  junit
-  junit
-  test
-
-
   commons-io
   commons-io
 


[38/50] [abbrv] hadoop git commit: YARN-6193. FairScheduler might not trigger preemption when using DRF. (kasha)

2017-02-21 Thread xgong
YARN-6193. FairScheduler might not trigger preemption when using DRF. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbbfcf74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbbfcf74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbbfcf74

Branch: refs/heads/YARN-5734
Commit: dbbfcf74ab44d7bfdc805b63affd0defc57182b8
Parents: c7a36e6
Author: Karthik Kambatla 
Authored: Fri Feb 17 14:07:31 2017 -0800
Committer: Karthik Kambatla 
Committed: Fri Feb 17 14:07:31 2017 -0800

--
 .../scheduler/fair/FSAppAttempt.java|  9 +++--
 .../fair/TestFairSchedulerPreemption.java   | 36 ++--
 2 files changed, 30 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbbfcf74/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index b1bb9a0..6ed0660 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -602,12 +602,11 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
 // Check if the app's allocation will be over its fairshare even
 // after preempting this container
-Resource currentUsage = getResourceUsage();
-Resource fairshare = getFairShare();
-Resource overFairShareBy = Resources.subtract(currentUsage, fairshare);
+Resource usageAfterPreemption = Resources.subtract(
+getResourceUsage(), container.getAllocatedResource());
 
-return (Resources.fitsIn(container.getAllocatedResource(),
-overFairShareBy));
+return !Resources.lessThan(fsQueue.getPolicy().getResourceCalculator(),
+scheduler.getClusterResource(), usageAfterPreemption, getFairShare());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbbfcf74/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
index a4d69bf..480a329 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
@@ -57,6 +57,7 @@ public class TestFairSchedulerPreemption extends 
FairSchedulerTestBase {
   private static final int NODE_CAPACITY_MULTIPLE = 4;
 
   private final boolean fairsharePreemption;
+  private final boolean drf;
 
   // App that takes up the entire cluster
   private FSAppAttempt greedyApp;
@@ -67,13 +68,17 @@ public class TestFairSchedulerPreemption extends 
FairSchedulerTestBase {
   @Parameterized.Parameters(name = "{0}")
   public static Collection getParameters() {
 return Arrays.asList(new Object[][] {
-{"FairSharePreemption", true},
-{"MinSharePreemption", false}});
+{"MinSharePreemption", 0},
+{"MinSharePreemptionWithDRF", 1},
+{"FairSharePreemption", 2},
+{"FairSharePreemptionWithDRF", 3}
+});
   }
 
-  public TestFairSchedulerPreemption(String name, boolean fairshare)
+  public TestFairSchedulerPreemption(String name, int mode)
   throws IOException {
-fairsharePreemption = fairshare;
+fairsharePreemption = (mode > 1); // 2 and 

[31/50] [abbrv] hadoop git commit: HADOOP-14049. Honour AclBit flag associated to file/folder permission for Azure datalake account. Contributed by Vishwajeet Dusane

2017-02-21 Thread xgong
HADOOP-14049. Honour AclBit flag associated to file/folder permission for Azure 
datalake account. Contributed by Vishwajeet Dusane


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4329990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4329990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4329990

Branch: refs/heads/YARN-5734
Commit: f4329990250bed62efdebe3ce2bc740092cf9573
Parents: a77f432
Author: Mingliang Liu 
Authored: Thu Feb 16 15:14:25 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 16 15:14:25 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  6 +++-
 .../hadoop/fs/adl/TestADLResponseData.java  | 21 +
 .../apache/hadoop/fs/adl/TestGetFileStatus.java | 25 +++
 .../apache/hadoop/fs/adl/TestListStatus.java| 32 
 4 files changed, 83 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4329990/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 303b7bc..fb0feda 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -593,7 +593,11 @@ public class AdlFileSystem extends FileSystem {
 boolean isDirectory = entry.type == DirectoryEntryType.DIRECTORY;
 long lastModificationData = entry.lastModifiedTime.getTime();
 long lastAccessTime = entry.lastAccessTime.getTime();
-FsPermission permission = new AdlPermission(aclBitStatus,
+// set aclBit from ADLS backend response if
+// ADL_SUPPORT_ACL_BIT_IN_FSPERMISSION is true.
+final boolean aclBit = aclBitStatus ? entry.aclBit : false;
+
+FsPermission permission = new AdlPermission(aclBit,
 Short.valueOf(entry.permission, 8));
 String user = entry.user;
 String group = entry.group;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4329990/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
index 24eb314..788242e 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
@@ -66,6 +66,15 @@ public final class TestADLResponseData {
 "\"owner\":\"NotSupportYet\",\"group\":\"NotSupportYet\"}}";
   }
 
+  public static String getGetFileStatusJSONResponse(boolean aclBit) {
+return "{\"FileStatus\":{\"length\":1024," +
+"\"pathSuffix\":\"\",\"type\":\"FILE\",\"blockSize\":268435456," +
+"\"accessTime\":1452103827023,\"modificationTime\":1452103827023," +
+"\"replication\":0,\"permission\":\"777\"," +
+"\"owner\":\"NotSupportYet\",\"group\":\"NotSupportYet\",\"aclBit\":\""
++ aclBit + "\"}}";
+  }
+
   public static String getListFileStatusJSONResponse(int dirSize) {
 String list = "";
 for (int i = 0; i < dirSize; ++i) {
@@ -81,6 +90,18 @@ public final class TestADLResponseData {
 return "{\"FileStatuses\":{\"FileStatus\":[" + list + "]}}";
   }
 
+  public static String getListFileStatusJSONResponse(boolean aclBit) {
+return "{\"FileStatuses\":{\"FileStatus\":[{\"length\":0,\"pathSuffix\":\""
++ java.util.UUID.randomUUID()
++ "\",\"type\":\"DIRECTORY\",\"blockSize\":0,"
++ "\"accessTime\":1481184513488,"
++ "\"modificationTime\":1481184513488,\"replication\":0,"
++ "\"permission\":\"770\","
++ "\"owner\":\"4b27fe1a-d9ab-4a04-ad7a-4bba72cd9e6c\","
++ "\"group\":\"4b27fe1a-d9ab-4a04-ad7a-4bba72cd9e6c\",\"aclBit\":\""
++ aclBit + "\"}]}}";
+  }
+
   public static String getJSONResponse(boolean status) {
 return "{\"boolean\":" + status + "}";
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4329990/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java
--
diff --git 

[24/50] [abbrv] hadoop git commit: YARN-6200. Reverting since the same functionality achieved by YARN-1623. Revert "YARN-5068. Expose scheduler queue to application master. (Harish Jaiprakash via rohi

2017-02-21 Thread xgong
YARN-6200. Reverting since the same functionality achieved by YARN-1623.
Revert "YARN-5068. Expose scheduler queue to application master. (Harish 
Jaiprakash via rohithsharmaks)"

This reverts commit b7ac85259c7d20c33bef9c9cb40b8aabcab70755.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a393e84c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a393e84c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a393e84c

Branch: refs/heads/YARN-5734
Commit: a393e84c6f57d50a471fc902dfd07ca1b4128a0e
Parents: 74dd142
Author: Rohith Sharma K S 
Authored: Thu Feb 16 18:00:25 2017 +0530
Committer: Rohith Sharma K S 
Committed: Thu Feb 16 18:00:25 2017 +0530

--
 .../apache/hadoop/yarn/api/ApplicationConstants.java  |  7 ---
 .../server/resourcemanager/amlauncher/AMLauncher.java | 14 --
 .../TestApplicationMasterLauncher.java|  5 -
 3 files changed, 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a393e84c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index 760e251..64bcc44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -160,13 +160,6 @@ public interface ApplicationConstants {
 LD_LIBRARY_PATH("LD_LIBRARY_PATH"),
 
 /**
- * $YARN_RESOURCEMANAGER_APPLICATION_QUEUE
- * The queue into which the app was submitted/launched.
- */
-YARN_RESOURCEMANAGER_APPLICATION_QUEUE(
-"YARN_RESOURCEMANAGER_APPLICATION_QUEUE"),
-
-/**
  * $HADOOP_CONF_DIR
  * Final, non-modifiable.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a393e84c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index d33360b..05f9f47 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.client.NMProxy;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
@@ -189,9 +188,6 @@ public class AMLauncher implements Runnable {
 ContainerLaunchContext container =
 applicationMasterContext.getAMContainerSpec();
 
-// Populate the current queue name in the environment variable.
-setupQueueNameEnv(container, applicationMasterContext);
-
 // Finalize the container
 setupTokens(container, containerID);
 // set the flow context optionally for timeline service v.2
@@ -200,16 +196,6 @@ public class AMLauncher implements Runnable {
 return container;
   }
 
-  private void setupQueueNameEnv(ContainerLaunchContext container,
-  ApplicationSubmissionContext applicationMasterContext) {
-String queueName = applicationMasterContext.getQueue();
-if (queueName == null) {
-  queueName = YarnConfiguration.DEFAULT_QUEUE_NAME;
-}
-container.getEnvironment().put(ApplicationConstants.Environment
-.YARN_RESOURCEMANAGER_APPLICATION_QUEUE.key(), queueName);
-  }
-
   @Private
   @VisibleForTesting
   protected void setupTokens(


[05/50] [abbrv] hadoop git commit: HDFS-11409. DatanodeInfo getNetworkLocation and setNetworkLocation shoud use volatile instead of synchronized. Contributed by Chen Liang.

2017-02-21 Thread xgong
HDFS-11409. DatanodeInfo getNetworkLocation and setNetworkLocation shoud use 
volatile instead of synchronized. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aaf27132
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aaf27132
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aaf27132

Branch: refs/heads/YARN-5734
Commit: aaf27132350547fcde1fdb372f19626838f44bc4
Parents: 0cf5993
Author: Xiaoyu Yao 
Authored: Tue Feb 14 12:52:34 2017 -0800
Committer: Xiaoyu Yao 
Committed: Tue Feb 14 12:52:34 2017 -0800

--
 .../java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf27132/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 41735b1..acbcffa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -51,7 +51,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private long lastUpdate;
   private long lastUpdateMonotonic;
   private int xceiverCount;
-  private String location = NetworkTopology.DEFAULT_RACK;
+  private volatile String location = NetworkTopology.DEFAULT_RACK;
   private String softwareVersion;
   private List dependentHostNames = new LinkedList<>();
   private String upgradeDomain;
@@ -293,11 +293,11 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 
   /** network location */
   @Override
-  public synchronized String getNetworkLocation() {return location;}
+  public String getNetworkLocation() {return location;}
 
   /** Sets the network location */
   @Override
-  public synchronized void setNetworkLocation(String location) {
+  public void setNetworkLocation(String location) {
 this.location = NodeBase.normalize(location);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: YARN-6174. Log files pattern should be same for both running and finished container. Contributed by Xuan Gong.

2017-02-21 Thread xgong
YARN-6174. Log files pattern should be same for both running and finished 
container. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce2d5bfa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce2d5bfa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce2d5bfa

Branch: refs/heads/YARN-5734
Commit: ce2d5bfa5f84e7e563980796549b56ef1e4bbf1e
Parents: 859bd15
Author: Junping Du 
Authored: Wed Feb 15 09:05:14 2017 -0800
Committer: Junping Du 
Committed: Wed Feb 15 09:05:14 2017 -0800

--
 .../ContainerLogAggregationType.java| 31 
 .../yarn/logaggregation/LogToolUtils.java   | 11 +++
 .../webapp/AHSWebServices.java  | 13 
 .../webapp/TestAHSWebServices.java  | 26 +---
 .../server/webapp/dao/ContainerLogsInfo.java|  8 ++---
 .../nodemanager/webapp/NMWebServices.java   | 11 +++
 .../webapp/dao/NMContainerLogsInfo.java |  4 +--
 .../nodemanager/webapp/TestNMWebServices.java   | 19 ++--
 8 files changed, 82 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce2d5bfa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogAggregationType.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogAggregationType.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogAggregationType.java
new file mode 100644
index 000..664448b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogAggregationType.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.logaggregation;
+
+/**
+ * Enumeration of various aggregation type of a container log.
+ */
+public enum ContainerLogAggregationType {
+
+  /** The log is from NodeManager local log directory. */
+  LOCAL,
+
+  /** The log is from Remote FileSystem application log directory. */
+  AGGREGATED
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce2d5bfa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
index d83a8ae..ae2517a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
@@ -141,7 +141,7 @@ public final class LogToolUtils {
   public static void outputContainerLog(String containerId, String nodeId,
   String fileName, long fileLength, long outputSize,
   String lastModifiedTime, InputStream fis, OutputStream os,
-  byte[] buf, ContainerLogType logType) throws IOException {
+  byte[] buf, ContainerLogAggregationType logType) throws IOException {
 long toSkip = 0;
 long totalBytesToRead = fileLength;
 long skipAfterRead = 0;
@@ -171,9 +171,9 @@ public final class LogToolUtils {
   LogToolUtils.CONTAINER_ON_NODE_PATTERN,
   containerId, nodeId);
   sb.append(containerStr + "\n");
-  sb.append("LogType: " + logType + "\n");
+  sb.append("LogAggregationType: " + logType + "\n");
   sb.append(StringUtils.repeat("=", containerStr.length()) + "\n");
-  sb.append("FileName:" + fileName + "\n");
+  

[04/50] [abbrv] hadoop git commit: HDFS-11084. Add a regression test for sticky bit support of OIV ReverseXML processor. Contributed by Wei-Chiu Chuang.

2017-02-21 Thread xgong
HDFS-11084. Add a regression test for sticky bit support of OIV ReverseXML 
processor. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cf59937
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cf59937
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cf59937

Branch: refs/heads/YARN-5734
Commit: 0cf5993712a01993bd701bd9664e6af284378b55
Parents: 1fa084c
Author: Wei-Chiu Chuang 
Authored: Tue Feb 14 08:59:12 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Tue Feb 14 09:11:55 2017 -0800

--
 .../tools/offlineImageViewer/TestOfflineImageViewer.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf59937/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 740a8ab..dacbb85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -69,6 +69,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -158,6 +160,15 @@ public class TestOfflineImageViewer {
   hdfs.mkdirs(invalidXMLDir);
   dirCount++;
 
+  //Create a directory with sticky bits
+  Path stickyBitDir = new Path("/stickyBit");
+  hdfs.mkdirs(stickyBitDir);
+  hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL,
+  FsAction.ALL, FsAction.ALL, true));
+  dirCount++;
+  writtenFiles.put(stickyBitDir.toString(),
+  hdfs.getFileStatus(stickyBitDir));
+
   // Get delegation tokens so we log the delegation token op
   Token[] delegationTokens = hdfs
   .addDelegationTokens(TEST_RENEWER, null);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: HDFS-11391. Numeric usernames do no work with WebHDFS FS write access. (Pierre Villard via Yongjun Zhang)

2017-02-21 Thread xgong
HDFS-11391. Numeric usernames do no work with WebHDFS FS write access. (Pierre 
Villard via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e53f2b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e53f2b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e53f2b9

Branch: refs/heads/YARN-5734
Commit: 8e53f2b9b08560bf4f8e81e697063277dbdc68f9
Parents: 652679a
Author: Yongjun Zhang 
Authored: Tue Feb 14 12:47:06 2017 -0800
Committer: Yongjun Zhang 
Committed: Tue Feb 14 13:40:53 2017 -0800

--
 .../hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java   | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e53f2b9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index 095f41d..f8c15fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsCreateModes;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.JsonUtil;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -108,6 +110,10 @@ public class WebHdfsHandler extends 
SimpleChannelInboundHandler {
 throws IOException {
 this.conf = conf;
 this.confForCreate = confForCreate;
+/** set user pattern based on configuration file */
+UserParam.setUserPattern(
+conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
+DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: YARN-5912. Fix breadcrumb issues for various pages in new YARN UI. Contributed by Akhil P B.

2017-02-21 Thread xgong
YARN-5912. Fix breadcrumb issues for various pages in new YARN UI. Contributed 
by Akhil P B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fa084c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fa084c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fa084c4

Branch: refs/heads/YARN-5734
Commit: 1fa084c4254b89cd45210727ccb68725d583ff62
Parents: b9f8491
Author: Sunil G 
Authored: Tue Feb 14 22:29:21 2017 +0530
Committer: Sunil G 
Committed: Tue Feb 14 22:29:21 2017 +0530

--
 .../webapp/app/controllers/yarn-app-attempt.js  |  2 +-
 .../webapp/app/controllers/yarn-app-attempts.js |  2 +-
 .../src/main/webapp/app/controllers/yarn-app.js |  2 +-
 .../main/webapp/app/controllers/yarn-apps.js|  2 +-
 .../app/controllers/yarn-container-log.js   |  7 +++-
 .../webapp/app/controllers/yarn-node-app.js |  7 +++-
 .../webapp/app/controllers/yarn-node-apps.js|  2 +-
 .../app/controllers/yarn-node-container.js  | 39 
 .../app/controllers/yarn-node-containers.js |  2 +-
 .../main/webapp/app/controllers/yarn-node.js|  2 +-
 .../webapp/app/controllers/yarn-services.js |  2 +-
 .../src/main/webapp/app/models/yarn-app.js  |  2 +-
 .../src/main/webapp/app/routes/yarn-node-app.js |  2 +-
 .../webapp/app/routes/yarn-node-container.js|  2 +-
 .../controllers/yarn-node-container-test.js | 30 +++
 15 files changed, 90 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
index a458842..4c02361 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
@@ -27,7 +27,7 @@ export default Ember.Controller.extend({
   routeName: 'application'
 },{
   text: "Applications",
-  routeName: 'yarn-apps'
+  routeName: 'yarn-apps.apps'
 }, {
   text: `App [${appId}]`,
   routeName: 'yarn-app',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
index 9ebc2a6..92de2f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
@@ -27,7 +27,7 @@ export default Ember.Controller.extend({
   routeName: 'application'
 },{
   text: "Applications",
-  routeName: 'yarn-apps'
+  routeName: 'yarn-apps.apps'
 }, {
   text: `App [${appId}]`,
   routeName: 'yarn-app',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
index 309c895..9c1cb5d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
@@ -27,7 +27,7 @@ export default Ember.Controller.extend({
   routeName: 'application'
 },{
   text: "Applications",
-  routeName: 'yarn-apps'
+  routeName: 'yarn-apps.apps'
 }, {
   text: `App [${appId}]`,
   routeName: 'yarn-app',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps.js
index 396f83b..18bf682 100644
--- 

[30/50] [abbrv] hadoop git commit: YARN-6171. ConcurrentModificationException on FSAppAttempt.containersToPreempt. (Miklos Szegedi via kasha)

2017-02-21 Thread xgong
YARN-6171. ConcurrentModificationException on FSAppAttempt.containersToPreempt. 
(Miklos Szegedi via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a77f4324
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a77f4324
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a77f4324

Branch: refs/heads/YARN-5734
Commit: a77f432449aad67da31bd8bf8644b71def741bde
Parents: 5d339c4
Author: Karthik Kambatla 
Authored: Thu Feb 16 14:54:51 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Feb 16 14:54:58 2017 -0800

--
 .../scheduler/fair/FSAppAttempt.java| 49 +++-
 .../scheduler/fair/FairScheduler.java   | 15 +++---
 2 files changed, 34 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a77f4324/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 563b892..b1bb9a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -83,8 +83,10 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
   private Resource fairShare = Resources.createResource(0, 0);
 
   // Preemption related variables
+  private final Object preemptionVariablesLock = new Object();
   private final Resource preemptedResources = 
Resources.clone(Resources.none());
   private final Set containersToPreempt = new HashSet<>();
+
   private Resource fairshareStarvation = Resources.none();
   private long lastTimeAtFairShare;
   private long nextStarvationCheck;
@@ -552,29 +554,29 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   }
 
   void trackContainerForPreemption(RMContainer container) {
-if (containersToPreempt.add(container)) {
-  synchronized (preemptedResources) {
+synchronized (preemptionVariablesLock) {
+  if (containersToPreempt.add(container)) {
 Resources.addTo(preemptedResources, container.getAllocatedResource());
   }
 }
   }
 
   private void untrackContainerForPreemption(RMContainer container) {
-if (containersToPreempt.remove(container)) {
-  synchronized (preemptedResources) {
+synchronized (preemptionVariablesLock) {
+  if (containersToPreempt.remove(container)) {
 Resources.subtractFrom(preemptedResources,
 container.getAllocatedResource());
   }
 }
   }
 
-  Set getPreemptionContainers() {
-return containersToPreempt;
-  }
-
-  private Resource getPreemptedResources() {
-synchronized (preemptedResources) {
-  return preemptedResources;
+  Set getPreemptionContainerIds() {
+synchronized (preemptionVariablesLock) {
+  Set preemptionContainerIds = new HashSet<>();
+  for (RMContainer container : containersToPreempt) {
+preemptionContainerIds.add(container.getContainerId());
+  }
+  return preemptionContainerIds;
 }
   }
 
@@ -591,9 +593,11 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   return false;
 }
 
-if (containersToPreempt.contains(container)) {
-  // The container is already under consideration for preemption
-  return false;
+synchronized (preemptionVariablesLock) {
+  if (containersToPreempt.contains(container)) {
+// The container is already under consideration for preemption
+return false;
+  }
 }
 
 // Check if the app's allocation will be over its fairshare even
@@ -969,7 +973,8 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 if (LOG.isTraceEnabled()) {
   LOG.trace("Assign container on " + node.getNodeName()
   + " node, assignType: OFF_SWITCH" + ", allowedLocality: "
-  + allowedLocality + ", priority: " + 
schedulerKey.getPriority()
+  + allowedLocality + ", priority: "
+  + schedulerKey.getPriority()
   + ", app attempt id: " + this.attemptId);
   

[11/50] [abbrv] hadoop git commit: HDFS-11238. Fix checkstyle warnings in NameNode#createNameNode. Contributed by Ethan Li.

2017-02-21 Thread xgong
HDFS-11238. Fix checkstyle warnings in NameNode#createNameNode. Contributed by 
Ethan Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8acb376c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8acb376c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8acb376c

Branch: refs/heads/YARN-5734
Commit: 8acb376c9c5f7f52a097be221ed18877a403bece
Parents: 1e11080
Author: Akira Ajisaka 
Authored: Wed Feb 15 16:53:50 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Feb 15 16:53:50 2017 +0900

--
 .../hadoop/hdfs/server/namenode/NameNode.java   | 101 +--
 1 file changed, 46 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8acb376c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index df5ee0f..1752cf7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -1579,62 +1579,53 @@ public class NameNode extends ReconfigurableBase 
implements
 }
 setStartupOption(conf, startOpt);
 
+boolean aborted = false;
 switch (startOpt) {
-  case FORMAT: {
-boolean aborted = format(conf, startOpt.getForceFormat(),
-startOpt.getInteractiveFormat());
-terminate(aborted ? 1 : 0);
-return null; // avoid javac warning
-  }
-  case GENCLUSTERID: {
-System.err.println("Generating new cluster id:");
-System.out.println(NNStorage.newClusterID());
-terminate(0);
-return null;
-  }
-  case ROLLBACK: {
-boolean aborted = doRollback(conf, true);
-terminate(aborted ? 1 : 0);
-return null; // avoid warning
-  }
-  case BOOTSTRAPSTANDBY: {
-String toolArgs[] = Arrays.copyOfRange(argv, 1, argv.length);
-int rc = BootstrapStandby.run(toolArgs, conf);
-terminate(rc);
-return null; // avoid warning
-  }
-  case INITIALIZESHAREDEDITS: {
-boolean aborted = initializeSharedEdits(conf,
-startOpt.getForceFormat(),
-startOpt.getInteractiveFormat());
-terminate(aborted ? 1 : 0);
-return null; // avoid warning
-  }
-  case BACKUP:
-  case CHECKPOINT: {
-NamenodeRole role = startOpt.toNodeRole();
-DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
-return new BackupNode(conf, role);
-  }
-  case RECOVER: {
-NameNode.doRecovery(startOpt, conf);
-return null;
-  }
-  case METADATAVERSION: {
-printMetadataVersion(conf);
-terminate(0);
-return null; // avoid javac warning
-  }
-  case UPGRADEONLY: {
-DefaultMetricsSystem.initialize("NameNode");
-new NameNode(conf);
-terminate(0);
-return null;
-  }
-  default: {
-DefaultMetricsSystem.initialize("NameNode");
-return new NameNode(conf);
-  }
+case FORMAT:
+  aborted = format(conf, startOpt.getForceFormat(),
+  startOpt.getInteractiveFormat());
+  terminate(aborted ? 1 : 0);
+  return null; // avoid javac warning
+case GENCLUSTERID:
+  System.err.println("Generating new cluster id:");
+  System.out.println(NNStorage.newClusterID());
+  terminate(0);
+  return null;
+case ROLLBACK:
+  aborted = doRollback(conf, true);
+  terminate(aborted ? 1 : 0);
+  return null; // avoid warning
+case BOOTSTRAPSTANDBY:
+  String[] toolArgs = Arrays.copyOfRange(argv, 1, argv.length);
+  int rc = BootstrapStandby.run(toolArgs, conf);
+  terminate(rc);
+  return null; // avoid warning
+case INITIALIZESHAREDEDITS:
+  aborted = initializeSharedEdits(conf,
+  startOpt.getForceFormat(),
+  startOpt.getInteractiveFormat());
+  terminate(aborted ? 1 : 0);
+  return null; // avoid warning
+case BACKUP:
+case CHECKPOINT:
+  NamenodeRole role = startOpt.toNodeRole();
+  DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
+  return new BackupNode(conf, role);
+case RECOVER:
+  NameNode.doRecovery(startOpt, conf);
+  return null;
+case METADATAVERSION:
+  printMetadataVersion(conf);
+  terminate(0);
+  return null; // avoid javac warning
+case UPGRADEONLY:
+  

[28/50] [abbrv] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-21 Thread xgong
YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate 
classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4fa1afdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4fa1afdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4fa1afdb

Branch: refs/heads/YARN-5734
Commit: 4fa1afdb883dab8786d2fb5c72a195dd2e87d711
Parents: 5690b51
Author: Sangjin Lee 
Authored: Thu Feb 16 11:41:04 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Feb 16 11:41:04 2017 -0800

--
 .../jobhistory/JobHistoryEventHandler.java  |  57 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  14 +-
 .../v2/app/rm/RMContainerAllocator.java |   4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   8 +-
 .../distributedshell/ApplicationMaster.java |  98 ++-
 .../hadoop/yarn/client/api/AMRMClient.java  |  40 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  21 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   5 +-
 .../yarn/client/api/impl/YarnClientImpl.java|  15 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |  94 +--
 .../yarn/client/api/TimelineV2Client.java   |  92 +++
 .../client/api/impl/TimelineClientImpl.java | 825 ++-
 .../yarn/client/api/impl/TimelineConnector.java | 440 ++
 .../client/api/impl/TimelineV2ClientImpl.java   | 459 +++
 .../client/api/impl/TestTimelineClient.java |  39 +-
 .../api/impl/TestTimelineClientV2Impl.java  |   4 +-
 .../timelineservice/NMTimelinePublisher.java|  22 +-
 .../TestNMTimelinePublisher.java|  10 +-
 .../TestTimelineServiceClientIntegration.java   |  10 +-
 19 files changed, 1272 insertions(+), 985 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fa1afdb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 0cc605c..285d36e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -72,13 +72,12 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.node.JsonNodeFactory;
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
 
@@ -90,8 +89,6 @@ import com.sun.jersey.api.client.ClientHandlerException;
  */
 public class JobHistoryEventHandler extends AbstractService
 implements EventHandler {
-  private static final JsonNodeFactory FACTORY =
-  new ObjectMapper().getNodeFactory();
 
   private final AppContext context;
   private final int startCount;
@@ -133,9 +130,10 @@ public class JobHistoryEventHandler extends AbstractService
   // should job completion be force when the AM shuts down?
   protected volatile boolean forceJobCompletion = false;
 
+  @VisibleForTesting
   protected TimelineClient timelineClient;
-
-  private boolean timelineServiceV2Enabled = false;
+  @VisibleForTesting
+  protected TimelineV2Client timelineV2Client;
 
   private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB";
   private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK";
@@ -268,12 +266,17 @@ public class JobHistoryEventHandler extends 
AbstractService
 MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
   LOG.info("Emitting job history data to the timeline service is enabled");
   if (YarnConfiguration.timelineServiceEnabled(conf)) {
-
-

[22/50] [abbrv] hadoop git commit: YARN-4212. FairScheduler: Can't create a DRF queue under a FAIR policy queue. (Yufei Gu via kasha)

2017-02-21 Thread xgong
YARN-4212. FairScheduler: Can't create a DRF queue under a FAIR policy queue. 
(Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11be3f70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11be3f70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11be3f70

Branch: refs/heads/YARN-5734
Commit: 11be3f70e029c2324b167563168c8a254d234aef
Parents: 6c25dbc
Author: Karthik Kambatla 
Authored: Wed Feb 15 23:51:22 2017 -0800
Committer: Karthik Kambatla 
Committed: Wed Feb 15 23:51:22 2017 -0800

--
 .../scheduler/fair/AllocationConfiguration.java |  11 +-
 .../scheduler/fair/FSLeafQueue.java |   9 -
 .../scheduler/fair/FSParentQueue.java   |  13 -
 .../resourcemanager/scheduler/fair/FSQueue.java |  50 ++-
 .../scheduler/fair/QueueManager.java|  28 +-
 .../scheduler/fair/SchedulingPolicy.java|  36 +--
 .../DominantResourceFairnessPolicy.java |   5 -
 .../fair/policies/FairSharePolicy.java  |  15 +-
 .../scheduler/fair/policies/FifoPolicy.java |  14 +-
 .../scheduler/fair/TestFSAppStarvation.java |  10 +-
 .../scheduler/fair/TestFairScheduler.java   |   1 -
 .../scheduler/fair/TestSchedulingPolicy.java| 302 +++
 12 files changed, 340 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11be3f70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index 7bd2616..f143aa6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -408,9 +408,8 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
* Initialize a {@link FSQueue} with queue-specific properties and its
* metrics.
* @param queue the FSQueue needed to be initialized
-   * @param scheduler the scheduler which the queue belonged to
*/
-  public void initFSQueue(FSQueue queue, FairScheduler scheduler){
+  public void initFSQueue(FSQueue queue){
 // Set queue-specific properties.
 String name = queue.getName();
 queue.setWeights(getQueueWeight(name));
@@ -419,14 +418,6 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 queue.setMaxRunningApps(getQueueMaxApps(name));
 queue.setMaxAMShare(getQueueMaxAMShare(name));
 queue.setMaxChildQueueResource(getMaxChildResources(name));
-try {
-  SchedulingPolicy policy = getSchedulingPolicy(name);
-  policy.initialize(scheduler.getClusterResource());
-  queue.setPolicy(policy);
-} catch (AllocationConfigurationException ex) {
-  LOG.warn("Failed to set the scheduling policy "
-  + getDefaultSchedulingPolicy(), ex);
-}
 
 // Set queue metrics.
 queue.getMetrics().setMinShare(getMinResources(name));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/11be3f70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index c4b2de6..59bde5b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 

[02/50] [abbrv] hadoop git commit: HADOOP-14058. Fix NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks. Contributed by Yiqun Lin.

2017-02-21 Thread xgong
HADOOP-14058. Fix 
NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9f84912
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9f84912
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9f84912

Branch: refs/heads/YARN-5734
Commit: b9f8491252f5a23a91a1d695d748556a0fd803ae
Parents: aaf106f
Author: Akira Ajisaka 
Authored: Wed Feb 15 01:45:56 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Feb 15 01:45:56 2017 +0900

--
 .../hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f84912/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
index ef223ac..261f79b 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
@@ -85,7 +85,7 @@ public abstract class NativeS3FileSystemContractBaseTest
 
   public void testDirWithDifferentMarkersWorks() throws Exception {
 
-for (int i = 0; i < 3; i++) {
+for (int i = 0; i <= 3; i++) {
   String base = "test/hadoop" + i;
   Path path = path("/" + base);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: HDFS-11333. Print a user friendly error message when plugins are not found. Contributed by Wei-Chiu Chuang.

2017-02-21 Thread xgong
HDFS-11333. Print a user friendly error message when plugins are not found. 
Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/859bd159
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/859bd159
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/859bd159

Branch: refs/heads/YARN-5734
Commit: 859bd159ae554174200334b5eb1d7e8dbef958ad
Parents: ff75132
Author: Wei-Chiu Chuang 
Authored: Wed Feb 15 02:50:35 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Wed Feb 15 02:50:35 2017 -0800

--
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 10 +-
 .../org/apache/hadoop/hdfs/server/namenode/NameNode.java | 11 +--
 2 files changed, 18 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/859bd159/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 9ed80ef..5db41bd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -921,7 +921,15 @@ public class DataNode extends ReconfigurableBase
   }
 
   private void startPlugins(Configuration conf) {
-plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
+try {
+  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY,
+  ServicePlugin.class);
+} catch (RuntimeException e) {
+  String pluginsValue = conf.get(DFS_DATANODE_PLUGINS_KEY);
+  LOG.error("Unable to load DataNode plugins. Specified list of plugins: " 
+
+  pluginsValue, e);
+  throw e;
+}
 for (ServicePlugin p: plugins) {
   try {
 p.start(this);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/859bd159/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 1752cf7..e7841f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -809,8 +809,15 @@ public class NameNode extends ReconfigurableBase implements
   httpServer.setFSImage(getFSImage());
 }
 rpcServer.start();
-plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
-ServicePlugin.class);
+try {
+  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
+  ServicePlugin.class);
+} catch (RuntimeException e) {
+  String pluginsValue = conf.get(DFS_NAMENODE_PLUGINS_KEY);
+  LOG.error("Unable to load NameNode plugins. Specified list of plugins: " 
+
+  pluginsValue, e);
+  throw e;
+}
 for (ServicePlugin p: plugins) {
   try {
 p.start(this);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: YARN-5798. Set UncaughtExceptionHandler for all FairScheduler threads. (Yufei Gu via kasha)

2017-02-21 Thread xgong
YARN-5798. Set UncaughtExceptionHandler for all FairScheduler threads. (Yufei 
Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74dd1422
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74dd1422
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74dd1422

Branch: refs/heads/YARN-5734
Commit: 74dd14225059322825f706120aa57cf673820daf
Parents: 11be3f7
Author: Karthik Kambatla 
Authored: Thu Feb 16 00:03:09 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Feb 16 00:03:09 2017 -0800

--
 .../server/resourcemanager/scheduler/fair/FairScheduler.java  | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74dd1422/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 18806bc..c5bf02a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -51,6 +51,7 @@ import 
org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import 
org.apache.hadoop.yarn.server.resourcemanager.RMCriticalThreadUncaughtExceptionHandler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationConstants;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
@@ -1268,12 +1269,16 @@ public class FairScheduler extends
 
   updateThread = new UpdateThread();
   updateThread.setName("FairSchedulerUpdateThread");
+  updateThread.setUncaughtExceptionHandler(
+  new RMCriticalThreadUncaughtExceptionHandler(rmContext));
   updateThread.setDaemon(true);
 
   if (continuousSchedulingEnabled) {
 // start continuous scheduling thread
 schedulingThread = new ContinuousSchedulingThread();
 schedulingThread.setName("FairSchedulerContinuousScheduling");
+schedulingThread.setUncaughtExceptionHandler(
+new RMCriticalThreadUncaughtExceptionHandler(rmContext));
 schedulingThread.setDaemon(true);
   }
 
@@ -1299,6 +1304,8 @@ public class FairScheduler extends
   @VisibleForTesting
   protected void createPreemptionThread() {
 preemptionThread = new FSPreemptionThread(this);
+preemptionThread.setUncaughtExceptionHandler(
+new RMCriticalThreadUncaughtExceptionHandler(rmContext));
   }
 
   private void updateReservationThreshold() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: YARN-4753. Use doxia macro to generate in-page TOC of YARN site documentation. (iwasakims)

2017-02-21 Thread xgong
YARN-4753. Use doxia macro to generate in-page TOC of YARN site documentation. 
(iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbc0c2bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbc0c2bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbc0c2bd

Branch: refs/heads/YARN-5734
Commit: fbc0c2bd763e3a3aad914eb9d60b05ad4ab2825f
Parents: 353a9b2
Author: Masatake Iwasaki 
Authored: Wed Feb 15 13:09:10 2017 +0900
Committer: Masatake Iwasaki 
Committed: Wed Feb 15 13:09:10 2017 +0900

--
 hadoop-project/src/site/site.xml|   2 +-
 .../src/site/markdown/CapacityScheduler.md  |  14 +--
 .../src/site/markdown/DockerContainers.md   |   8 +-
 .../src/site/markdown/FairScheduler.md  |  14 +--
 .../src/site/markdown/NodeLabel.md  |  14 +--
 .../src/site/markdown/NodeManager.md|   8 +-
 .../src/site/markdown/NodeManagerCgroups.md |   3 +-
 .../src/site/markdown/NodeManagerRest.md|   8 +-
 .../site/markdown/OpportunisticContainers.md|  17 +--
 .../src/site/markdown/ReservationSystem.md  |   6 +-
 .../src/site/markdown/ResourceManagerHA.md  |  10 +-
 .../src/site/markdown/ResourceManagerRest.md|  29 +
 .../src/site/markdown/ResourceManagerRestart.md |  13 +--
 .../src/site/markdown/SecureContainer.md|   2 +-
 .../src/site/markdown/TimelineServer.md |  16 +--
 .../src/site/markdown/TimelineServiceV2.md  | 110 ---
 .../src/site/markdown/WebApplicationProxy.md|   7 +-
 .../src/site/markdown/WebServicesIntro.md   |  13 +--
 .../site/markdown/WritingYarnApplications.md|  14 +--
 .../site/markdown/YarnApplicationSecurity.md|   2 +
 .../src/site/markdown/YarnCommands.md   |  26 +
 21 files changed, 69 insertions(+), 267 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0c2bd/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 618ad4c..ae3aef5 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -145,7 +145,7 @@
   
   
   
-  
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0c2bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 9c9b03e..737bdc2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -15,19 +15,7 @@
 Hadoop: Capacity Scheduler
 ==
 
-* [Purpose](#Purpose)
-* [Overview](#Overview)
-* [Features](#Features)
-* [Configuration](#Configuration)
-* [Setting up `ResourceManager` to use 
`CapacityScheduler`](#Setting_up_ResourceManager_to_use_CapacityScheduler`)
-* [Setting up queues](#Setting_up_queues)
-* [Queue Properties](#Queue_Properties)
-* [Setup for application priority](#Setup_for_application_priority.)
-* [Capacity Scheduler container 
preemption](#Capacity_Scheduler_container_preemption)
-* [Configuring `ReservationSystem` with 
`CapacityScheduler`](#Configuring_ReservationSystem_with_CapacityScheduler)
-* [Other Properties](#Other_Properties)
-* [Reviewing the configuration of the 
CapacityScheduler](#Reviewing_the_configuration_of_the_CapacityScheduler)
-* [Changing Queue Configuration](#Changing_Queue_Configuration)
+
 
 Purpose
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0c2bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index b74fa7b..e66d079 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -15,13 +15,7 @@
 Launching Applications Using Docker Containers
 ==
 
-* [Overview](#Overview)
-* [Cluster Configuration](#Cluster_Configuration)
-* [Docker Image Requirements](#Docker_Image_Requirements)
-* [Application 

[18/50] [abbrv] hadoop git commit: HDFS-8498. Blocks can be committed with wrong size. Contributed by Jing Zhao.

2017-02-21 Thread xgong
HDFS-8498. Blocks can be committed with wrong size. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/627da6f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/627da6f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/627da6f7

Branch: refs/heads/YARN-5734
Commit: 627da6f7178e18aa41996969c408b6f344e297d1
Parents: 0fc6f38
Author: Jing Zhao 
Authored: Wed Feb 15 10:44:37 2017 -0800
Committer: Jing Zhao 
Committed: Wed Feb 15 10:44:37 2017 -0800

--
 .../org/apache/hadoop/hdfs/DataStreamer.java| 100 +--
 .../apache/hadoop/hdfs/StripedDataStreamer.java |   8 +-
 .../apache/hadoop/hdfs/TestDFSOutputStream.java |   3 +-
 3 files changed, 72 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/627da6f7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 8e6eb63..0268537 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -142,8 +142,6 @@ class DataStreamer extends Daemon {
 
 /**
  * Record a connection exception.
- * @param e
- * @throws InvalidEncryptionKeyException
  */
 void recordFailure(final InvalidEncryptionKeyException e)
 throws InvalidEncryptionKeyException {
@@ -178,9 +176,8 @@ class DataStreamer extends Daemon {
 final StorageType[] targetStorageTypes,
 final Token blockToken) throws IOException {
   //send the TRANSFER_BLOCK request
-  new Sender(out)
-  .transferBlock(block, blockToken, dfsClient.clientName, targets,
-  targetStorageTypes);
+  new Sender(out).transferBlock(block.getCurrentBlock(), blockToken,
+  dfsClient.clientName, targets, targetStorageTypes);
   out.flush();
   //ack
   BlockOpResponseProto transferResponse = BlockOpResponseProto
@@ -199,6 +196,42 @@ class DataStreamer extends Daemon {
 }
   }
 
+  static class BlockToWrite {
+private ExtendedBlock currentBlock;
+
+BlockToWrite(ExtendedBlock block) {
+  setCurrentBlock(block);
+}
+
+synchronized ExtendedBlock getCurrentBlock() {
+  return currentBlock == null ? null : new ExtendedBlock(currentBlock);
+}
+
+synchronized long getNumBytes() {
+  return currentBlock == null ? 0 : currentBlock.getNumBytes();
+}
+
+synchronized void setCurrentBlock(ExtendedBlock block) {
+  currentBlock = (block == null || block.getLocalBlock() == null) ?
+  null : new ExtendedBlock(block);
+}
+
+synchronized void setNumBytes(long numBytes) {
+  assert currentBlock != null;
+  currentBlock.setNumBytes(numBytes);
+}
+
+synchronized void setGenerationStamp(long generationStamp) {
+  assert currentBlock != null;
+  currentBlock.setGenerationStamp(generationStamp);
+}
+
+@Override
+public synchronized String toString() {
+  return currentBlock == null ? "null" : currentBlock.toString();
+}
+  }
+
   /**
* Create a socket for a write pipeline
*
@@ -440,7 +473,7 @@ class DataStreamer extends Daemon {
   }
 
   private volatile boolean streamerClosed = false;
-  protected volatile ExtendedBlock block; // its length is number of bytes 
acked
+  protected final BlockToWrite block; // its length is number of bytes acked
   protected Token accessToken;
   private DataOutputStream blockStream;
   private DataInputStream blockReplyStream;
@@ -508,7 +541,7 @@ class DataStreamer extends Daemon {
ByteArrayManager byteArrayManage,
boolean isAppend, String[] favoredNodes,
EnumSet flags) {
-this.block = block;
+this.block = new BlockToWrite(block);
 this.dfsClient = dfsClient;
 this.src = src;
 this.progress = progress;
@@ -1322,7 +1355,7 @@ class DataStreamer extends Daemon {
   LocatedBlock lb;
   //get a new datanode
   lb = dfsClient.namenode.getAdditionalDatanode(
-  src, stat.getFileId(), block, nodes, storageIDs,
+  src, stat.getFileId(), block.getCurrentBlock(), nodes, storageIDs,
   exclude.toArray(new DatanodeInfo[exclude.size()]),
   1, dfsClient.clientName);
   // a new node was allocated by the namenode. Update nodes.
@@ -1440,7 +1473,7 @@ class DataStreamer extends Daemon {
 } // while
 

[17/50] [abbrv] hadoop git commit: Addendum patch for YARN-6174 - remove deleted file.

2017-02-21 Thread xgong
Addendum patch for YARN-6174 - remove deleted file.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fc6f383
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fc6f383
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fc6f383

Branch: refs/heads/YARN-5734
Commit: 0fc6f38379f0047afd23ac14abcb5086d65a7f67
Parents: ce2d5bf
Author: Junping Du 
Authored: Wed Feb 15 09:07:49 2017 -0800
Committer: Junping Du 
Committed: Wed Feb 15 09:07:49 2017 -0800

--
 .../yarn/logaggregation/ContainerLogType.java   | 31 
 1 file changed, 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fc6f383/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogType.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogType.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogType.java
deleted file mode 100644
index c101499..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogType.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.logaggregation;
-
-/**
- * Enumeration of various type of a container log.
- */
-public enum ContainerLogType {
-
-  /** The log is from NodeManager local log directory. */
-  LOCAL,
-
-  /** The log is from Remote FileSystem application log directory. */
-  AGGREGATED
-}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] [abbrv] hadoop git commit: HADOOP-13942. Update checkstyle and checkstyle plugin version to handle indentation of JDK8 Lambdas.

2017-02-21 Thread xgong
HADOOP-13942. Update checkstyle and checkstyle plugin version to handle 
indentation of JDK8 Lambdas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e11080b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e11080b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e11080b

Branch: refs/heads/YARN-5734
Commit: 1e11080b7825a2d0bafce91432009f585b7b5d21
Parents: fbc0c2b
Author: Akira Ajisaka 
Authored: Wed Feb 15 16:33:30 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Feb 15 16:35:08 2017 +0900

--
 hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml | 4 +---
 pom.xml | 4 ++--
 2 files changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e11080b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
--
diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml 
b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
index 851b57d..1b968ae 100644
--- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
+++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
@@ -123,9 +123,7 @@
 
 
 
-
-  
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e11080b/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 2ca27c1..3eeba1e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -107,8 +107,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 3.3.0
 2.5.0
 1.0.0
-2.15
-6.6
+2.17
+7.5.1
 1.4.3
 
 bash


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: YARN-6200. addendum to fix compilation error caused by reverting YARN-5068.

2017-02-21 Thread xgong
YARN-6200. addendum to fix compilation error caused by reverting YARN-5068.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e63a7814
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e63a7814
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e63a7814

Branch: refs/heads/YARN-5734
Commit: e63a7814d21c6469adb01a3a93cfb3ed7613437d
Parents: a393e84
Author: Rohith Sharma K S 
Authored: Thu Feb 16 18:19:57 2017 +0530
Committer: Rohith Sharma K S 
Committed: Thu Feb 16 18:19:57 2017 +0530

--
 .../hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e63a7814/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index 05f9f47..7051f8c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.client.NMProxy;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: HDFS-11375. Display the volume storage type in datanode UI. Contributed by Surendra Singh Lilhore

2017-02-21 Thread xgong
HDFS-11375. Display the volume storage type in datanode UI. Contributed by 
Surendra Singh Lilhore


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0741dd3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0741dd3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0741dd3b

Branch: refs/heads/YARN-5734
Commit: 0741dd3b9abdeb65bb783c1a8b01f078c4bdba17
Parents: 627da6f
Author: Mingliang Liu 
Authored: Wed Feb 15 11:37:26 2017 -0800
Committer: Mingliang Liu 
Committed: Wed Feb 15 11:37:26 2017 -0800

--
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 3 +++
 .../hadoop-hdfs/src/main/webapps/datanode/datanode.html   | 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0741dd3b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d1f8f05..6d00d75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2606,6 +2606,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
 final long reservedSpaceForReplicas; // size of space reserved RBW or
 // re-replication
 final long numBlocks;
+final StorageType storageType;
 
 VolumeInfo(FsVolumeImpl v, long usedSpace, long freeSpace) {
   this.directory = v.toString();
@@ -2614,6 +2615,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   this.reservedSpace = v.getReserved();
   this.reservedSpaceForReplicas = v.getReservedForReplicas();
   this.numBlocks = v.getNumBlocks();
+  this.storageType = v.getStorageType();
 }
   }  
 
@@ -2649,6 +2651,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   innerInfo.put("reservedSpace", v.reservedSpace);
   innerInfo.put("reservedSpaceForReplicas", v.reservedSpaceForReplicas);
   innerInfo.put("numBlocks", v.numBlocks);
+  innerInfo.put("storageType", v.storageType);
   info.put(v.directory, innerInfo);
 }
 return info;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0741dd3b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
index b35a0a7..e474ab5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
@@ -100,6 +100,7 @@
   
 
   Directory
+  StorageType
   Capacity Used
   Capacity Left
   Capacity Reserved
@@ -110,6 +111,7 @@
   {#dn.VolumeInfo}
 
   {name}
+  {storageType}
   {usedSpace|fmt_bytes}
   {freeSpace|fmt_bytes}
   {reservedSpace|fmt_bytes}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: HDFS-11410. Use the cached instance when edit logging SetAclOp, SetXAttrOp and RemoveXAttrOp.

2017-02-21 Thread xgong
HDFS-11410. Use the cached instance when edit logging SetAclOp, SetXAttrOp and 
RemoveXAttrOp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02c54948
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02c54948
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02c54948

Branch: refs/heads/YARN-5734
Commit: 02c549484a4fe6215c7f1a18d89389dbba6ea723
Parents: 658702e
Author: Xiao Chen 
Authored: Thu Feb 16 18:07:55 2017 -0800
Committer: Xiao Chen 
Committed: Thu Feb 16 18:07:55 2017 -0800

--
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  6 +++---
 .../hdfs/server/namenode/FSEditLogOp.java   | 21 +---
 2 files changed, 12 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02c54948/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 8454a46..d3f4447 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1206,14 +1206,14 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   void logSetAcl(String src, List entries) {
-SetAclOp op = SetAclOp.getInstance();
+final SetAclOp op = SetAclOp.getInstance(cache.get());
 op.src = src;
 op.aclEntries = entries;
 logEdit(op);
   }
   
   void logSetXAttrs(String src, List xAttrs, boolean toLogRpcIds) {
-final SetXAttrOp op = SetXAttrOp.getInstance();
+final SetXAttrOp op = SetXAttrOp.getInstance(cache.get());
 op.src = src;
 op.xAttrs = xAttrs;
 logRpcIds(op, toLogRpcIds);
@@ -1221,7 +1221,7 @@ public class FSEditLog implements LogsPurgeable {
   }
   
   void logRemoveXAttrs(String src, List xAttrs, boolean toLogRpcIds) {
-final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
+final RemoveXAttrOp op = RemoveXAttrOp.getInstance(cache.get());
 op.src = src;
 op.xAttrs = xAttrs;
 logRpcIds(op, toLogRpcIds);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02c54948/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index a3285a9..6293557 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -3745,8 +3745,7 @@ public abstract class FSEditLogOp {
 }
 
 static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
-  return (AddCacheDirectiveInfoOp) cache
-  .get(OP_ADD_CACHE_DIRECTIVE);
+  return (AddCacheDirectiveInfoOp) cache.get(OP_ADD_CACHE_DIRECTIVE);
 }
 
 @Override
@@ -3816,8 +3815,7 @@ public abstract class FSEditLogOp {
 }
 
 static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
-  return (ModifyCacheDirectiveInfoOp) cache
-  .get(OP_MODIFY_CACHE_DIRECTIVE);
+  return (ModifyCacheDirectiveInfoOp) cache.get(OP_MODIFY_CACHE_DIRECTIVE);
 }
 
 @Override
@@ -3893,8 +3891,7 @@ public abstract class FSEditLogOp {
 }
 
 static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
-  return (RemoveCacheDirectiveInfoOp) cache
-  .get(OP_REMOVE_CACHE_DIRECTIVE);
+  return (RemoveCacheDirectiveInfoOp) cache.get(OP_REMOVE_CACHE_DIRECTIVE);
 }
 
 @Override
@@ -4146,8 +4143,8 @@ public abstract class FSEditLogOp {
   super(OP_REMOVE_XATTR);
 }
 
-static RemoveXAttrOp getInstance() {
-  return new RemoveXAttrOp();
+static RemoveXAttrOp getInstance(OpInstanceCache cache) {
+  return (RemoveXAttrOp) cache.get(OP_REMOVE_XATTR);
 }
 
 @Override
@@ -4199,8 +4196,8 @@ public abstract class FSEditLogOp {
   super(OP_SET_XATTR);
 }
 
-static SetXAttrOp getInstance() {
-  return new SetXAttrOp();
+static SetXAttrOp getInstance(OpInstanceCache cache) {
+  return (SetXAttrOp) cache.get(OP_SET_XATTR);
 }
 
 @Override
@@ -4252,8 +4249,8 @@ public abstract class FSEditLogOp {
   super(OP_SET_ACL);
 

[41/50] [abbrv] hadoop git commit: HADOOP-14081. S3A: Consider avoiding array copy in S3ABlockOutputStream (ByteArrayBlock). Contributed by Rajesh Balamohan

2017-02-21 Thread xgong
HADOOP-14081. S3A: Consider avoiding array copy in S3ABlockOutputStream 
(ByteArrayBlock). Contributed by Rajesh Balamohan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8035749c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8035749c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8035749c

Branch: refs/heads/YARN-5734
Commit: 8035749c26947dc641ef87dac041050d439a16d1
Parents: 172b23a
Author: Steve Loughran 
Authored: Mon Feb 20 16:21:00 2017 +
Committer: Steve Loughran 
Committed: Mon Feb 20 16:21:46 2017 +

--
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java | 26 +---
 1 file changed, 22 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8035749c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
index 0fe2af7..05f8efe 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
@@ -298,6 +298,25 @@ final class S3ADataBlocks {
 
   }
 
+  static class S3AByteArrayOutputStream extends ByteArrayOutputStream {
+
+S3AByteArrayOutputStream(int size) {
+  super(size);
+}
+
+/**
+ * InputStream backed by the internal byte array
+ *
+ * @return
+ */
+ByteArrayInputStream getInputStream() {
+  ByteArrayInputStream bin = new ByteArrayInputStream(this.buf, 0, count);
+  this.reset();
+  this.buf = null;
+  return bin;
+}
+  }
+
   /**
* Stream to memory via a {@code ByteArrayOutputStream}.
*
@@ -310,14 +329,14 @@ final class S3ADataBlocks {
*/
 
   static class ByteArrayBlock extends DataBlock {
-private ByteArrayOutputStream buffer;
+private S3AByteArrayOutputStream buffer;
 private final int limit;
 // cache data size so that it is consistent after the buffer is reset.
 private Integer dataSize;
 
 ByteArrayBlock(int limit) {
   this.limit = limit;
-  buffer = new ByteArrayOutputStream();
+  buffer = new S3AByteArrayOutputStream(limit);
 }
 
 /**
@@ -333,8 +352,7 @@ final class S3ADataBlocks {
 InputStream startUpload() throws IOException {
   super.startUpload();
   dataSize = buffer.size();
-  ByteArrayInputStream bufferData = new ByteArrayInputStream(
-  buffer.toByteArray());
+  ByteArrayInputStream bufferData = buffer.getInputStream();
   buffer = null;
   return bufferData;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: YARN-6163. FS Preemption is a trickle for severely starved applications. (kasha)

2017-02-21 Thread xgong
YARN-6163. FS Preemption is a trickle for severely starved applications. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c25dbcd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c25dbcd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c25dbcd

Branch: refs/heads/YARN-5734
Commit: 6c25dbcdc0517a825b92fb16444aa1d3761e160c
Parents: a136936
Author: Karthik Kambatla 
Authored: Wed Feb 15 23:16:01 2017 -0800
Committer: Karthik Kambatla 
Committed: Wed Feb 15 23:16:12 2017 -0800

--
 .../hadoop/yarn/util/resource/Resources.java|  18 +++
 .../scheduler/AbstractYarnScheduler.java|   4 +
 .../scheduler/fair/FSAppAttempt.java| 110 --
 .../scheduler/fair/FSLeafQueue.java | 111 +-
 .../scheduler/fair/FSPreemptionThread.java  | 132 -
 .../scheduler/fair/FairScheduler.java   |   4 +
 .../fair/FairSchedulerConfiguration.java|  23 ++-
 .../fair/VisitedResourceRequestTracker.java | 146 +++
 .../fair/FairSchedulerWithMockPreemption.java   |   5 +-
 .../scheduler/fair/TestFSAppStarvation.java |  20 ++-
 .../fair/TestFairSchedulerPreemption.java   |  45 +++---
 .../fair/TestVisitedResourceRequestTracker.java | 112 ++
 12 files changed, 585 insertions(+), 145 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c25dbcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 044a232..57b3a46 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -182,6 +182,24 @@ public class Resources {
 return subtractFrom(clone(lhs), rhs);
   }
 
+  /**
+   * Subtract rhs from lhs and reset any negative
+   * values to zero.
+   * @param lhs {@link Resource} to subtract from
+   * @param rhs {@link Resource} to subtract
+   * @return the value of lhs after subtraction
+   */
+  public static Resource subtractFromNonNegative(Resource lhs, Resource rhs) {
+subtractFrom(lhs, rhs);
+if (lhs.getMemorySize() < 0) {
+  lhs.setMemorySize(0);
+}
+if (lhs.getVirtualCores() < 0) {
+  lhs.setVirtualCores(0);
+}
+return lhs;
+  }
+
   public static Resource negate(Resource resource) {
 return subtract(NONE, resource);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c25dbcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 64427b7..ce6d2a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -127,6 +127,7 @@ public abstract class AbstractYarnScheduler
*/
   protected ConcurrentMap applications;
   protected int nmExpireInterval;
+  protected long nmHeartbeatInterval;
 
   protected final static List EMPTY_CONTAINER_LIST =
   new ArrayList();
@@ -163,6 +164,9 @@ public abstract class AbstractYarnScheduler
 nmExpireInterval =
 conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
   YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
+nmHeartbeatInterval =
+conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS,
+YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS);
 long configuredMaximumAllocationWaitTime =
 

[01/50] [abbrv] hadoop git commit: YARN-5966. AMRMClient changes to support ExecutionType update. (asuresh) [Forced Update!]

2017-02-21 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 b59c20d17 -> 6e1a54403 (forced update)


YARN-5966. AMRMClient changes to support ExecutionType update. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aaf106fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aaf106fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aaf106fd

Branch: refs/heads/YARN-5734
Commit: aaf106fde35ec97e2e2ea4d7a67434038c4273ac
Parents: 4164a20
Author: Arun Suresh 
Authored: Tue Feb 14 06:08:27 2017 -0800
Committer: Arun Suresh 
Committed: Tue Feb 14 06:09:10 2017 -0800

--
 .../yarn/api/records/UpdateContainerError.java  |  19 +-
 .../src/main/proto/yarn_service_protos.proto|   1 +
 .../hadoop/yarn/client/api/AMRMClient.java  |  33 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  33 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   7 +-
 .../yarn/client/api/impl/AMRMClientImpl.java| 111 +++--
 .../yarn/client/api/impl/TestAMRMClient.java|  60 ++-
 .../api/impl/TestAMRMClientOnRMRestart.java |   8 +-
 .../TestOpportunisticContainerAllocation.java   | 400 +--
 .../impl/pb/UpdateContainerErrorPBImpl.java |  16 +
 .../server/resourcemanager/RMServerUtils.java   |  14 +-
 ...pportunisticContainerAllocatorAMService.java |   5 +-
 .../capacity/TestIncreaseAllocationExpirer.java |   4 +-
 13 files changed, 587 insertions(+), 124 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf106fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
index e7458cf..4d184cb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
@@ -59,6 +59,22 @@ public abstract class UpdateContainerError {
   public abstract void setReason(String reason);
 
   /**
+   * Get current container version.
+   * @return Current container Version.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract int getCurrentContainerVersion();
+
+  /**
+   * Set current container version.
+   * @param currentVersion Current container version.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract void setCurrentContainerVersion(int currentVersion);
+
+  /**
* Get the {@code UpdateContainerRequest} that was not satisfiable.
* @return UpdateContainerRequest
*/
@@ -89,6 +105,7 @@ public abstract class UpdateContainerError {
   @Override
   public String toString() {
 return "UpdateContainerError{reason=" + getReason() + ", "
++ "currentVersion=" + getCurrentContainerVersion() + ", "
 + "req=" + getUpdateContainerRequest() + "}";
   }
 
@@ -120,6 +137,6 @@ public abstract class UpdateContainerError {
 } else if (!req.equals(other.getUpdateContainerRequest())) {
   return false;
 }
-return true;
+return getCurrentContainerVersion() == other.getCurrentContainerVersion();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf106fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index df3c852..c6647c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -78,6 +78,7 @@ message UpdateContainerRequestProto {
 message UpdateContainerErrorProto {
   optional string reason = 1;
   optional UpdateContainerRequestProto update_request = 2;
+  optional int32 current_container_version = 3;
 }
 
 message AllocateRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf106fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
--
diff --git 

[14/50] [abbrv] hadoop git commit: YARN-6183. Few missing informations in Application and Application Attempt pages for new YARN UI. Contributed by Akhil PB.

2017-02-21 Thread xgong
YARN-6183. Few missing informations in Application and Application Attempt 
pages for new YARN UI. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff751323
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff751323
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff751323

Branch: refs/heads/YARN-5734
Commit: ff7513235579bd9ff48d59127864ceb8eda3c509
Parents: b7613e0
Author: Sunil G 
Authored: Wed Feb 15 15:52:57 2017 +0530
Committer: Sunil G 
Committed: Wed Feb 15 15:52:57 2017 +0530

--
 .../webapp/app/components/app-attempt-table.js  |  9 -
 .../src/main/webapp/app/controllers/yarn-app.js |  6 +++---
 .../src/main/webapp/app/models/yarn-app.js  |  2 +-
 .../src/main/webapp/app/serializers/yarn-app.js |  6 +++---
 .../src/main/webapp/app/styles/app.css  | 20 ++--
 .../webapp/app/templates/cluster-overview.hbs   | 10 +-
 .../templates/components/app-attempt-table.hbs  |  6 +++---
 .../templates/components/container-table.hbs|  6 +++---
 .../src/main/webapp/app/templates/yarn-app.hbs  |  8 
 9 files changed, 44 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff751323/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-attempt-table.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-attempt-table.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-attempt-table.js
index 4b741b8..3c43037 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-attempt-table.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-attempt-table.js
@@ -19,4 +19,11 @@
 import Ember from 'ember';
 
 export default Ember.Component.extend({
-});
\ No newline at end of file
+  nodeHttpAddressFormatted: Ember.computed('attempt.nodeHttpAddress', 
function() {
+var nodeHttpAddress = this.get('attempt.nodeHttpAddress');
+if (nodeHttpAddress && nodeHttpAddress.indexOf('://') < 0) {
+  nodeHttpAddress = 'http://' + nodeHttpAddress;
+}
+return nodeHttpAddress;
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff751323/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
index 9c1cb5d..f699a22 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
@@ -35,11 +35,11 @@ export default Ember.Controller.extend({
 }];
   }),
 
-  amHostHttpAddressFormatted: function() {
+  amHostHttpAddressFormatted: Ember.computed('model.app.amHostHttpAddress', 
function() {
 var amHostAddress = this.get('model.app.amHostHttpAddress');
-if (amHostAddress.indexOf('http://') < 0) {
+if (amHostAddress && amHostAddress.indexOf('://') < 0) {
   amHostAddress = 'http://' + amHostAddress;
 }
 return amHostAddress;
-  }.property('model.app.amHostHttpAddress')
+  })
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff751323/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
index 1366357..638e5b0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
@@ -37,7 +37,7 @@ export default DS.Model.extend({
   amNodeLabelExpression: DS.attr('string'),
   applicationTags: DS.attr('string'),
   applicationType: DS.attr('string'),
-  priority: DS.attr('number'),
+  priority: DS.attr('string'),
   allocatedMB: DS.attr('number'),
   allocatedVCores: DS.attr('number'),
   runningContainers: DS.attr('number'),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff751323/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
--
diff --git 

[08/50] [abbrv] hadoop git commit: YARN-6061. Addendum. Remove extraneous change.

2017-02-21 Thread xgong
YARN-6061. Addendum. Remove extraneous change.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/353a9b2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/353a9b2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/353a9b2d

Branch: refs/heads/YARN-5734
Commit: 353a9b2d9165a221491395edbadf8acc3a39990b
Parents: 8e53f2b
Author: Karthik Kambatla 
Authored: Tue Feb 14 15:19:52 2017 -0800
Committer: Karthik Kambatla 
Committed: Tue Feb 14 15:19:52 2017 -0800

--
 .../scheduler/fair/policies/DominantResourceFairnessPolicy.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/353a9b2d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
index 7a29735..ad41b11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
@@ -90,7 +90,7 @@ public class DominantResourceFairnessPolicy extends 
SchedulingPolicy {
 
   @Override
   public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) 
{
-return Resources.greaterThan(CALCULATOR, null, usage, fairShare);
+return !Resources.fitsIn(usage, fairShare);
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: HDFS-11100. Recursively deleting file protected by sticky bit should fail. Contributed by John Zhuge.

2017-02-21 Thread xgong
HDFS-11100. Recursively deleting file protected by sticky bit should fail. 
Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5690b51e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5690b51e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5690b51e

Branch: refs/heads/YARN-5734
Commit: 5690b51ef7c708c0a71162ddaff04466bc71cdcc
Parents: e63a781
Author: Wei-Chiu Chuang 
Authored: Thu Feb 16 05:39:37 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Thu Feb 16 05:39:37 2017 -0800

--
 .../apache/hadoop/fs/FSExceptionMessages.java   |  3 +
 .../server/namenode/FSPermissionChecker.java| 87 +---
 .../hadoop/fs/permission/TestStickyBit.java | 63 ++
 3 files changed, 142 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5690b51e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
index 1511bb0..a8e7b71 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
@@ -48,4 +48,7 @@ public class FSExceptionMessages {
   = "Requested more bytes than destination buffer size";
 
   public static final String PERMISSION_DENIED = "Permission denied";
+
+  public static final String PERMISSION_DENIED_BY_STICKY_BIT =
+  "Permission denied by sticky bit";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5690b51e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 107d563..f1250dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -18,11 +18,15 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
 import java.util.Stack;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
@@ -280,9 +284,20 @@ class FSPermissionChecker implements AccessControlEnforcer 
{
   return;
 }
 
+// Each inode in the subtree has a level. The root inode has level 0.
+// List subINodePath tracks the inode path in the subtree during
+// traversal. The root inode is not stored because it is already in array
+// components. The list index is (level - 1).
+ArrayList subINodePath = new ArrayList<>();
+
+// The stack of levels matches the stack of directory inodes.
+Stack levels = new Stack<>();
+levels.push(0);// Level 0 is the root
+
 Stack directories = new Stack();
 for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
   INodeDirectory d = directories.pop();
+  int level = levels.pop();
   ReadOnlyList cList = d.getChildrenList(snapshotId);
   if (!(cList.isEmpty() && ignoreEmptyDir)) {
 //TODO have to figure this out with inodeattribute provider
@@ -292,11 +307,44 @@ class FSPermissionChecker implements 
AccessControlEnforcer {
   throw new AccessControlException(
   toAccessControlString(inodeAttr, d.getFullPathName(), access));
 }
+
+if (level > 0) {
+  if (level - 1 < subINodePath.size()) {
+subINodePath.set(level - 1, d);
+  } else {
+Preconditions.checkState(level - 1 == subINodePath.size());
+subINodePath.add(d);
+  }
+}
+
+if (inodeAttr.getFsPermission().getStickyBit()) {
+  for (INode child : cList) {
+INodeAttributes childInodeAttr =
+getINodeAttrs(components, pathIdx, child, snapshotId);
+if 

[40/57] [abbrv] hadoop git commit: HDFS-10534. NameNode WebUI should display DataNode usage histogram. Contributed by Kai Sasaki.

2017-01-26 Thread xgong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18e1d682/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
index 5021fb5..0901125 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
@@ -292,4 +292,13 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
   min-width: 75px;
   float: right;
   left: 75px;
+}
+
+.bar rect {
+fill: #5FA33F;
+}
+
+.bar text {
+fill: #fff;
+font: 10px sans-serif;
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/57] [abbrv] hadoop git commit: YARN-6117. SharedCacheManager does not start up. Contributed by Chris Trezzo.

2017-01-26 Thread xgong
YARN-6117. SharedCacheManager does not start up. Contributed by Chris Trezzo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc6ec970
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc6ec970
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc6ec970

Branch: refs/heads/YARN-5734
Commit: dc6ec9704829f180ce0e182c436fe1a435744c88
Parents: a2c5012
Author: Sangjin Lee 
Authored: Mon Jan 23 21:07:25 2017 -0800
Committer: Sangjin Lee 
Committed: Mon Jan 23 21:07:25 2017 -0800

--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml  | 1 +
 .../hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep | 0
 2 files changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc6ec970/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index ca410f6..5707444 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -220,6 +220,7 @@
 src/main/resources/webapps/yarn/.keep
 
src/main/resources/webapps/applicationhistory/.keep
 src/main/resources/webapps/timeline/.keep
+src/main/resources/webapps/sharedcache/.keep
 src/main/resources/webapps/cluster/.keep
 src/main/resources/webapps/test/.keep
 src/main/resources/webapps/proxy/.keep

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc6ec970/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep
new file mode 100644
index 000..e69de29


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/57] [abbrv] hadoop git commit: YARN-6106. Document FairScheduler 'allowPreemptionFrom' queue property. (Yufei Gu via rchiang)

2017-01-26 Thread xgong
YARN-6106.  Document FairScheduler 'allowPreemptionFrom' queue property. (Yufei 
Gu via rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9bab85ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9bab85ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9bab85ca

Branch: refs/heads/YARN-5734
Commit: 9bab85cadfdfa0d9071303452f72c0a5d008480a
Parents: 355b907
Author: Ray Chiang 
Authored: Fri Jan 20 14:04:10 2017 -0800
Committer: Ray Chiang 
Committed: Fri Jan 20 14:08:25 2017 -0800

--
 .../hadoop-yarn-site/src/site/markdown/FairScheduler.md| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bab85ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index ae4c3ab..5d7b3dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -119,6 +119,8 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * fairSharePreemptionThreshold: the fair share preemption threshold for 
the queue. If the queue waits fairSharePreemptionTimeout without receiving 
fairSharePreemptionThreshold\*fairShare resources, it is allowed to preempt 
containers to take resources from other queues. If not set, the queue will 
inherit the value from its parent queue.
 
+* allowPreemptionFrom: determines whether the scheduler is allowed to 
preempt resources from the queue. The default is true. If a queue has this 
property set to false, this property will apply recursively to all child queues.
+
 * **User elements**: which represent settings governing the behavior of 
individual users. They can contain a single property: maxRunningApps, a limit 
on the number of running apps for a particular user.
 
 * **A userMaxAppsDefault element**: which sets the default running app limit 
for any users whose limit is not otherwise specified.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/57] [abbrv] hadoop git commit: YARN-6099. Improve webservice to list aggregated log files. Contributed by Xuan Gong.

2017-01-26 Thread xgong
YARN-6099. Improve webservice to list aggregated log files. Contributed by Xuan 
Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8528d85a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8528d85a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8528d85a

Branch: refs/heads/YARN-5734
Commit: 8528d85a68c0e6ea71026df4d3026e7edc206b2d
Parents: a33ce45
Author: Junping Du 
Authored: Tue Jan 24 15:26:53 2017 -0800
Committer: Junping Du 
Committed: Tue Jan 24 15:26:53 2017 -0800

--
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  |  76 +++
 .../logaggregation/AggregatedLogFormat.java |   2 +-
 .../yarn/logaggregation/ContainerLogMeta.java   |  61 ++
 .../yarn/logaggregation/ContainerLogType.java   |  31 +++
 .../logaggregation/LogAggregationUtils.java |  64 +-
 .../yarn/logaggregation/LogCLIHelpers.java  | 119 ---
 .../yarn/logaggregation/LogToolUtils.java   | 117 +++
 .../logaggregation/PerContainerLogFileInfo.java |  93 +
 .../logaggregation/TestContainerLogsUtils.java  | 122 +++
 .../webapp/AHSWebServices.java  | 148 +-
 .../webapp/TestAHSWebServices.java  | 200 +++
 .../server/webapp/dao/ContainerLogsInfo.java|  78 +++-
 .../nodemanager/webapp/NMWebServices.java   |  56 +-
 .../webapp/dao/ContainerLogsInfo.java   | 112 ---
 .../webapp/dao/NMContainerLogsInfo.java |  79 
 .../nodemanager/webapp/TestNMWebServices.java   |  64 +-
 16 files changed, 897 insertions(+), 525 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8528d85a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index a9ca96c..b8119e5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest;
 import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers;
+import org.apache.hadoop.yarn.logaggregation.PerContainerLogFileInfo;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.codehaus.jettison.json.JSONArray;
@@ -409,9 +410,10 @@ public class LogsCLI extends Configured implements Tool {
 return false;
   }
 
-  private List getContainerLogFiles(Configuration conf,
-  String containerIdStr, String nodeHttpAddress) throws IOException {
-List logFileInfos = new ArrayList<>();
+  private List getContainerLogFiles(
+  Configuration conf, String containerIdStr, String nodeHttpAddress)
+  throws IOException {
+List logFileInfos = new ArrayList<>();
 Client webServiceClient = Client.create();
 try {
   WebResource webResource = webServiceClient
@@ -425,11 +427,20 @@ public class LogsCLI extends Configured implements Tool {
   ClientResponse.Status.OK.getStatusCode()) {
 try {
   JSONObject json = response.getEntity(JSONObject.class);
-  JSONArray array = json.getJSONArray("containerLogInfo");
+  JSONArray array = json.getJSONArray("containerLogsInfo");
   for (int i = 0; i < array.length(); i++) {
-String fileName = array.getJSONObject(i).getString("fileName");
-String fileSize = array.getJSONObject(i).getString("fileSize");
-logFileInfos.add(new PerLogFileInfo(fileName, fileSize));
+JSONObject log = array.getJSONObject(i);
+Object ob = log.get("containerLogInfo");
+if (ob instanceof JSONArray) {
+  JSONArray obArray = (JSONArray)ob;
+  for (int j = 0; j < obArray.length(); j++) {
+logFileInfos.add(generatePerContainerLogFileInfoFromJSON(
+obArray.getJSONObject(j)));
+  }
+} else if (ob instanceof JSONObject) {
+  logFileInfos.add(generatePerContainerLogFileInfoFromJSON(
+  (JSONObject)ob));
+}
   }
 } catch (Exception e) {
   System.err.println("Unable to parse json from webservice. Error:");
@@ 

[30/57] [abbrv] hadoop git commit: YARN-6012. Remove node label (removeFromClusterNodeLabels) document is missing (Contributed by Ying Zhang via Daniel Templeton)

2017-01-26 Thread xgong
YARN-6012. Remove node label (removeFromClusterNodeLabels) document is missing 
(Contributed by Ying Zhang via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2c50127
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2c50127
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2c50127

Branch: refs/heads/YARN-5734
Commit: a2c50127d12b0aefcf1b050e0ab0d2df2abb30b3
Parents: ce83205
Author: Daniel Templeton 
Authored: Mon Jan 23 17:40:44 2017 -0800
Committer: Daniel Templeton 
Committed: Mon Jan 23 17:42:54 2017 -0800

--
 .../hadoop-yarn-site/src/site/markdown/NodeLabel.md  | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2c50127/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
index a87658d..73c9f41 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
@@ -20,6 +20,7 @@ YARN Node Labels
 * [Configuration](#Configuration)
 * [Setting up ResourceManager to enable Node 
Labels](#Setting_up_ResourceManager_to_enable_Node_Labels)
 * [Add/modify node labels list to 
YARN](#Add/modify_node_labels_list_to_YARN)
+* [Remove node labels from YARN](#Remove_node_labels_from_YARN)
 * [Add/modify node-to-labels mapping to 
YARN](#Add/modify_node-to-labels_mapping_to_YARN)
 * [Configuration of Schedulers for node 
labels](#Configuration_of_Schedulers_for_node_labels)
 * [Specifying node label for 
application](#Specifying_node_label_for_application)
@@ -86,6 +87,13 @@ Notes:
 * If user don’t specify “(exclusive=…)”, exclusive will be 
```true``` by default.
 * Run ```yarn cluster --list-node-labels``` to check added node labels are 
visible in the cluster.
 
+###Remove node labels from YARN
+
+* Remove cluster node labels:
+* To remove one or more node labels, execute the following command: 
```yarn rmadmin -removeFromClusterNodeLabels "[,,...]"```. The 
command argument should be a comma-separated list of node labels to remove.
+* It is not allowed to remove a label which has been associated with 
queues, i.e., one or more queues have access to this label.
+* To verify if specified node labels have been successfully removed, run 
```yarn cluster --list-node-labels```.
+
 ###Add/modify node-to-labels mapping to YARN
 
 * Configuring nodes to labels mapping in **Centralized** NodeLabel setup


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/57] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-26 Thread xgong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation 

[13/57] [abbrv] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-26 Thread xgong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the application table.
+ */
+public enum ApplicationColumnPrefix implements ColumnPrefix {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(ApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(ApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(ApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an application.
+   */
+  EVENT(ApplicationColumnFamily.INFO, "e"),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(ApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(ApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnHelper column;
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix) {
+this(columnFamily, columnPrefix, GenericConverter.getInstance());
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix, ValueConverter converter) {
+column = new ColumnHelper(columnFamily, converter);
+this.columnFamily = 

[32/57] [abbrv] hadoop git commit: YARN-6082. Invalid REST api response for getApps since queueUsagePercentage is coming as INF. Contributed by Sunil G.

2017-01-26 Thread xgong
YARN-6082. Invalid REST api response for getApps since queueUsagePercentage is 
coming as INF. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0101267d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0101267d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0101267d

Branch: refs/heads/YARN-5734
Commit: 0101267d9d801eab4cb3b4df289c402ecb591685
Parents: dc6ec97
Author: Rohith Sharma K S 
Authored: Tue Jan 24 10:59:55 2017 +0530
Committer: Rohith Sharma K S 
Committed: Tue Jan 24 10:59:55 2017 +0530

--
 .../scheduler/SchedulerApplicationAttempt.java  |  9 ++---
 .../scheduler/TestSchedulerApplicationAttempt.java  | 12 
 2 files changed, 18 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0101267d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 7558eac..3e41cb4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -1037,9 +1037,12 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
   float queueUsagePerc = 0.0f;
   float clusterUsagePerc = 0.0f;
   if (!calc.isInvalidDivisor(cluster)) {
-queueUsagePerc = calc.divide(cluster, usedResourceClone, Resources
-.multiply(cluster, queue.getQueueInfo(false, false).getCapacity()))
-* 100;
+float queueCapacityPerc = queue.getQueueInfo(false, false)
+.getCapacity();
+if (queueCapacityPerc != 0) {
+  queueUsagePerc = calc.divide(cluster, usedResourceClone,
+  Resources.multiply(cluster, queueCapacityPerc)) * 100;
+}
 clusterUsagePerc = calc.divide(cluster, usedResourceClone, cluster)
 * 100;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0101267d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
index 9a6c8d4..c5e5183 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
@@ -227,6 +227,18 @@ public class TestSchedulerApplicationAttempt {
 0.01f);
 assertEquals(60.0f,
 app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
+
+queue = createQueue("test3", null, 0.0f);
+app = new SchedulerApplicationAttempt(appAttId, user, queue,
+queue.getActiveUsersManager(), rmContext);
+
+// Resource request
+app.attemptResourceUsage.incUsed(requestedResource);
+
+assertEquals(0.0f, app.getResourceUsageReport().getQueueUsagePercentage(),
+0.01f);
+assertEquals(15.0f,
+app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
   }
 
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/57] [abbrv] hadoop git commit: MAPREDUCE-6808. Log map attempts as part of shuffle handler audit log (Contributed by Gergő Pásztor via Daniel Templeton)

2017-01-26 Thread xgong
MAPREDUCE-6808. Log map attempts as part of shuffle handler audit log 
(Contributed by Gergő Pásztor via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7463b6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7463b6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7463b6c

Branch: refs/heads/YARN-5734
Commit: a7463b6c88f698950a2f326030261001aa51b35e
Parents: 9e19f75
Author: Daniel Templeton 
Authored: Wed Jan 25 14:30:50 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Jan 25 14:32:40 2017 -0800

--
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7463b6c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 9547062..15a1b89 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -947,7 +947,7 @@ public class ShuffleHandler extends AuxiliaryService {
   // to turn it on please enable this audit log
   // on log4j.properties by uncommenting the setting
   if (AUDITLOG.isDebugEnabled()) {
-AUDITLOG.debug("shuffle for " + jobQ.get(0) +
+AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
  " reducer " + reduceQ.get(0));
   }
   int reduceId;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[55/57] [abbrv] hadoop git commit: YARN-4975. Fair Scheduler: exception thrown when a parent queue marked 'parent' has configured child queues (Contributed by Yufei Gu via Daniel Templeton)

2017-01-26 Thread xgong
YARN-4975. Fair Scheduler: exception thrown when a parent queue marked 'parent' 
has configured child queues
(Contributed by Yufei Gu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f85b74cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f85b74cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f85b74cc

Branch: refs/heads/YARN-5734
Commit: f85b74ccf9f1c1c1444cc00750b03468cbf40fb9
Parents: 7c1cc30
Author: Daniel Templeton 
Authored: Thu Jan 26 10:31:09 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Jan 26 10:31:09 2017 -0800

--
 .../fair/AllocationFileLoaderService.java   | 26 +++---
 .../fair/TestAllocationFileLoaderService.java   | 88 
 2 files changed, 101 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f85b74cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index cd4a19b..163a265 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -487,6 +487,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
 Map racls = new HashMap<>();
 NodeList fields = element.getChildNodes();
 boolean isLeaf = true;
+boolean isReservable = false;
 
 for (int j = 0; j < fields.getLength(); j++) {
   Node fieldNode = fields.item(j);
@@ -558,7 +559,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
 racls.put(ReservationACL.SUBMIT_RESERVATIONS,
 new AccessControlList(text));
   } else if ("reservation".equals(field.getTagName())) {
-isLeaf = false;
+isReservable = true;
 reservableQueues.add(queueName);
 configuredQueues.get(FSQueueType.PARENT).add(queueName);
   } else if ("allowPreemptionFrom".equals(field.getTagName())) {
@@ -577,22 +578,21 @@ public class AllocationFileLoaderService extends 
AbstractService {
 isLeaf = false;
   }
 }
-if (isLeaf) {
-  // if a leaf in the alloc file is marked as type='parent'
-  // then store it under 'parent'
-  if ("parent".equals(element.getAttribute("type"))) {
-configuredQueues.get(FSQueueType.PARENT).add(queueName);
-  } else {
-configuredQueues.get(FSQueueType.LEAF).add(queueName);
-  }
+
+// if a leaf in the alloc file is marked as type='parent'
+// then store it as a parent queue
+if (isLeaf && !"parent".equals(element.getAttribute("type"))) {
+  configuredQueues.get(FSQueueType.LEAF).add(queueName);
 } else {
-  if ("parent".equals(element.getAttribute("type"))) {
-throw new AllocationConfigurationException("Both  and " +
-"type=\"parent\" found for queue " + queueName + " which is " +
-"unsupported");
+  if (isReservable) {
+throw new AllocationConfigurationException("The configuration settings"
++ " for " + queueName + " are invalid. A queue element that "
++ "contains child queue elements or that has the type='parent' "
++ "attribute cannot also include a reservation element.");
   }
   configuredQueues.get(FSQueueType.PARENT).add(queueName);
 }
+
 // Set default acls if not defined
 // The root queue defaults to all access
 for (QueueACL acl : QueueACL.values()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f85b74cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
--
diff --git 

[47/57] [abbrv] hadoop git commit: YARN-5641. Localizer leaves behind tarballs after container is complete. Contributed by Eric Badger

2017-01-26 Thread xgong
YARN-5641. Localizer leaves behind tarballs after container is complete. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e19f758
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e19f758
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e19f758

Branch: refs/heads/YARN-5734
Commit: 9e19f758c1950cbcfcd1969461a8a910efca0767
Parents: a46933e
Author: Jason Lowe 
Authored: Wed Jan 25 21:41:43 2017 +
Committer: Jason Lowe 
Committed: Wed Jan 25 21:41:43 2017 +

--
 .../main/java/org/apache/hadoop/util/Shell.java |  52 ++-
 .../java/org/apache/hadoop/util/TestShell.java  |   4 +-
 .../localizer/ContainerLocalizer.java   |  53 +++-
 .../localizer/TestContainerLocalizer.java   | 317 +++
 4 files changed, 348 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e19f758/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 83877b7..ca59b0e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -27,7 +27,9 @@ import java.io.InterruptedIOException;
 import java.nio.charset.Charset;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
 import java.util.WeakHashMap;
@@ -50,8 +52,8 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public abstract class Shell {
-  private static final Map  CHILD_PROCESSES =
-  Collections.synchronizedMap(new WeakHashMap());
+  private static final Map CHILD_SHELLS =
+  Collections.synchronizedMap(new WeakHashMap());
   public static final Logger LOG = LoggerFactory.getLogger(Shell.class);
 
   /**
@@ -820,6 +822,7 @@ public abstract class Shell {
   private File dir;
   private Process process; // sub process used to execute the command
   private int exitCode;
+  private Thread waitingThread;
 
   /** Flag to indicate whether or not the script has finished executing. */
   private final AtomicBoolean completed = new AtomicBoolean(false);
@@ -920,7 +923,9 @@ public abstract class Shell {
 } else {
   process = builder.start();
 }
-CHILD_PROCESSES.put(process, null);
+
+waitingThread = Thread.currentThread();
+CHILD_SHELLS.put(this, null);
 
 if (timeOutInterval > 0) {
   timeOutTimer = new Timer("Shell command timeout");
@@ -1017,7 +1022,8 @@ public abstract class Shell {
 LOG.warn("Error while closing the error stream", ioe);
   }
   process.destroy();
-  CHILD_PROCESSES.remove(process);
+  waitingThread = null;
+  CHILD_SHELLS.remove(this);
   lastTime = Time.monotonicNow();
 }
   }
@@ -1065,6 +1071,15 @@ public abstract class Shell {
 return exitCode;
   }
 
+  /** get the thread that is waiting on this instance of Shell.
+   * @return the thread that ran runCommand() that spawned this shell
+   * or null if no thread is waiting for this shell to complete
+   */
+  public Thread getWaitingThread() {
+return waitingThread;
+  }
+
+
   /**
* This is an IOException with exit code added.
*/
@@ -1318,20 +1333,27 @@ public abstract class Shell {
   }
 
   /**
-   * Static method to destroy all running Shell processes
-   * Iterates through a list of all currently running Shell
-   * processes and destroys them one by one. This method is thread safe and
-   * is intended to be used in a shutdown hook.
+   * Static method to destroy all running Shell processes.
+   * Iterates through a map of all currently running Shell
+   * processes and destroys them one by one. This method is thread safe
*/
-  public static void destroyAllProcesses() {
-synchronized (CHILD_PROCESSES) {
-  for (Process key : CHILD_PROCESSES.keySet()) {
-Process process = key;
-if (key != null) {
-  process.destroy();
+  public static void destroyAllShellProcesses() {
+synchronized (CHILD_SHELLS) {
+  for (Shell shell : CHILD_SHELLS.keySet()) {
+if (shell.getProcess() != null) {
+  shell.getProcess().destroy();
 }
   }
-  CHILD_PROCESSES.clear();
+  CHILD_SHELLS.clear();
+}
+  }
+
+  /**
+   * Static method to return a Set of all Shell 

[51/57] [abbrv] hadoop git commit: Add CHANGES, RELEASENOTES, and jdiff for 3.0.0-alpha2 release.

2017-01-26 Thread xgong
Add CHANGES, RELEASENOTES, and jdiff for 3.0.0-alpha2 release.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff02bdfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff02bdfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff02bdfe

Branch: refs/heads/YARN-5734
Commit: ff02bdfe65b5ff894efd1a3aa0e35bac9f4b783c
Parents: a7463b6
Author: Andrew Wang 
Authored: Wed Jan 25 12:49:29 2017 -0800
Committer: Andrew Wang 
Committed: Wed Jan 25 15:39:40 2017 -0800

--
 .../3.0.0-alpha2/CHANGES.3.0.0-alpha2.md| 927 +++
 .../3.0.0-alpha2/RELEASENOTES.3.0.0-alpha2.md   | 618 +
 .../jdiff/Apache_Hadoop_HDFS_3.0.0-alpha2.xml   | 326 +++
 3 files changed, 1871 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[52/57] [abbrv] hadoop git commit: HADOOP-13989. Remove erroneous source jar option from hadoop-client shade configuration. Contributed by Joe Pallas.

2017-01-26 Thread xgong
HADOOP-13989. Remove erroneous source jar option from hadoop-client shade 
configuration. Contributed by Joe Pallas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd59b9cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd59b9cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd59b9cc

Branch: refs/heads/YARN-5734
Commit: cd59b9ccab51376310484a6e3d9179bb52fccae1
Parents: ff02bdf
Author: Andrew Wang 
Authored: Wed Jan 25 15:40:45 2017 -0800
Committer: Andrew Wang 
Committed: Wed Jan 25 15:40:45 2017 -0800

--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml | 1 -
 hadoop-client-modules/hadoop-client-runtime/pom.xml | 1 -
 2 files changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd59b9cc/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index d29ef8f..83d2748 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -575,7 +575,6 @@
   shade
 
 
-  true
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd59b9cc/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 0f3140f..cff3329 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -138,7 +138,6 @@
   shade
 
 
-  true
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[56/57] [abbrv] hadoop git commit: HDFS-11364. Add a test to verify Audit log entries for setfacl/getfacl commands over FS shell. Contributed by Manoj Govindassamy.

2017-01-26 Thread xgong
HDFS-11364. Add a test to verify Audit log entries for setfacl/getfacl commands 
over FS shell. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44606aa8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44606aa8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44606aa8

Branch: refs/heads/YARN-5734
Commit: 44606aa8508a6e98219b8330e625c8d397bfb067
Parents: f85b74c
Author: Xiao Chen 
Authored: Thu Jan 26 10:44:29 2017 -0800
Committer: Xiao Chen 
Committed: Thu Jan 26 10:48:26 2017 -0800

--
 .../hdfs/server/namenode/TestAuditLogger.java   | 73 
 1 file changed, 73 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44606aa8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index d637abc..0e3cc8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
@@ -58,6 +59,14 @@ import java.util.List;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_KEY;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_MAX_SIZE_KEY;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY;
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
+import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
+import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
+import static org.apache.hadoop.fs.permission.AclEntryType.USER;
+import static org.apache.hadoop.fs.permission.FsAction.ALL;
+import static org.apache.hadoop.fs.permission.FsAction.EXECUTE;
+import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.NNTOP_ENABLED_KEY;
@@ -444,6 +453,70 @@ public class TestAuditLogger {
   }
 
   /**
+   * Verify Audit log entries for the successful ACL API calls and ACL commands
+   * over FS Shell.
+   */
+  @Test (timeout = 6)
+  public void testAuditLogForAcls() throws Exception {
+final Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
+DummyAuditLogger.class.getName());
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+try {
+  cluster.waitClusterUp();
+  assertTrue(DummyAuditLogger.initialized);
+
+  final FileSystem fs = cluster.getFileSystem();
+  final Path p = new Path("/debug.log");
+  DFSTestUtil.createFile(fs, p, 1024, (short)1, 0L);
+
+  DummyAuditLogger.resetLogCount();
+  fs.getAclStatus(p);
+  assertEquals(1, DummyAuditLogger.logCount);
+
+  // FS shell command '-getfacl' additionally calls getFileInfo() and then
+  // followed by getAclStatus() only if the ACL bit is set. Since the
+  // initial permission didn't have the ACL bit set, getAclStatus() is
+  // skipped.
+  DFSTestUtil.FsShellRun("-getfacl " + p.toUri().getPath(), 0, null, conf);
+  assertEquals(2, DummyAuditLogger.logCount);
+
+  final List acls = Lists.newArrayList();
+  acls.add(AclTestHelpers.aclEntry(ACCESS, USER, ALL));
+  acls.add(AclTestHelpers.aclEntry(ACCESS, USER, "user1", ALL));
+  acls.add(AclTestHelpers.aclEntry(ACCESS, GROUP, READ_EXECUTE));
+  acls.add(AclTestHelpers.aclEntry(ACCESS, OTHER, EXECUTE));
+
+  fs.setAcl(p, acls);
+  assertEquals(3, DummyAuditLogger.logCount);
+
+  // Since the file has ACL bit set, FS shell command '-getfacl' should now
+  // call getAclStatus() additionally after 

<    1   2   3   4   5   6   7   8   9   10   >