hadoop git commit: HADOOP-13720. Add more info to the msgs printed in AbstractDelegationTokenSecretManager. Contributed by Yongjun Zhang.

2016-11-10 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 1a6a5af44 -> 51f5bab69


HADOOP-13720. Add more info to the msgs printed in 
AbstractDelegationTokenSecretManager. Contributed by Yongjun Zhang.

(cherry picked from commit fd2f22adec5c2f21f792045dbfde9385c21403ec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51f5bab6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51f5bab6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51f5bab6

Branch: refs/heads/branch-2.8
Commit: 51f5bab695f693e05dd59feaec4afb8cd07745f6
Parents: 1a6a5af
Author: Yongjun Zhang 
Authored: Thu Nov 10 22:21:54 2016 -0800
Committer: Yongjun Zhang 
Committed: Thu Nov 10 23:41:48 2016 -0800

--
 .../AbstractDelegationTokenSecretManager.java   | 69 +---
 .../main/java/org/apache/hadoop/util/Time.java  | 18 +
 .../org/apache/hadoop/io/file/tfile/Timer.java  | 52 +++
 .../java/org/apache/hadoop/util/TestTime.java   | 50 ++
 4 files changed, 136 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f5bab6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index cc2efc9..0e311dd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -53,6 +53,10 @@ extends AbstractDelegationTokenIdentifier>
   private static final Log LOG = LogFactory
   .getLog(AbstractDelegationTokenSecretManager.class);
 
+  private String formatTokenId(TokenIdent id) {
+return "(" + id + ")";
+  }
+
   /** 
* Cache of currently valid tokens, mapping from DelegationTokenIdentifier 
* to DelegationTokenInformation. Protected by this object lock.
@@ -312,7 +316,8 @@ extends AbstractDelegationTokenIdentifier>
 int keyId = identifier.getMasterKeyId();
 DelegationKey dKey = allKeys.get(keyId);
 if (dKey == null) {
-  LOG.warn("No KEY found for persisted identifier " + 
identifier.toString());
+  LOG.warn("No KEY found for persisted identifier "
+  + formatTokenId(identifier));
   return;
 }
 byte[] password = createPassword(identifier.getBytes(), dKey.getKey());
@@ -323,7 +328,8 @@ extends AbstractDelegationTokenIdentifier>
   currentTokens.put(identifier, new DelegationTokenInformation(renewDate,
   password, getTrackingIdIfEnabled(identifier)));
 } else {
-  throw new IOException("Same delegation token being added twice.");
+  throw new IOException("Same delegation token being added twice: "
+  + formatTokenId(identifier));
 }
   }
 
@@ -393,7 +399,7 @@ extends AbstractDelegationTokenIdentifier>
 identifier.setMaxDate(now + tokenMaxLifetime);
 identifier.setMasterKeyId(currentKey.getKeyId());
 identifier.setSequenceNumber(sequenceNum);
-LOG.info("Creating password for identifier: " + identifier
+LOG.info("Creating password for identifier: " + formatTokenId(identifier)
 + ", currentKey: " + currentKey.getKeyId());
 byte[] password = createPassword(identifier.getBytes(), 
currentKey.getKey());
 DelegationTokenInformation tokenInfo = new DelegationTokenInformation(now
@@ -401,7 +407,8 @@ extends AbstractDelegationTokenIdentifier>
 try {
   storeToken(identifier, tokenInfo);
 } catch (IOException ioe) {
-  LOG.error("Could not store token !!", ioe);
+  LOG.error("Could not store token " + formatTokenId(identifier) + "!!",
+  ioe);
 }
 return password;
   }
@@ -418,11 +425,14 @@ extends AbstractDelegationTokenIdentifier>
 assert Thread.holdsLock(this);
 DelegationTokenInformation info = getTokenInfo(identifier);
 if (info == null) {
-  throw new InvalidToken("token (" + identifier.toString()
-  + ") can't be found in cache");
+  throw new InvalidToken("token " + formatTokenId(identifier)
+  + " can't be found in cache");
 }
-if (info.getRenewDate() < Time.now()) {
-  throw new InvalidToken("token (" + identifier.toString() + ") is 
expired");
+long now = Time.now();
+if (info.getRenewDate() < now) {
+  throw new InvalidToken("token " + 

hadoop git commit: HADOOP-13720. Add more info to the msgs printed in AbstractDelegationTokenSecretManager. Contributed by Yongjun Zhang.

2016-11-10 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 470bdaa27 -> fd2f22ade


HADOOP-13720. Add more info to the msgs printed in 
AbstractDelegationTokenSecretManager. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd2f22ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd2f22ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd2f22ad

Branch: refs/heads/trunk
Commit: fd2f22adec5c2f21f792045dbfde9385c21403ec
Parents: 470bdaa
Author: Yongjun Zhang 
Authored: Thu Nov 10 22:21:54 2016 -0800
Committer: Yongjun Zhang 
Committed: Thu Nov 10 22:42:59 2016 -0800

--
 .../AbstractDelegationTokenSecretManager.java   | 69 +---
 .../main/java/org/apache/hadoop/util/Time.java  | 18 +
 .../org/apache/hadoop/io/file/tfile/Timer.java  | 52 +++
 .../java/org/apache/hadoop/util/TestTime.java   | 50 ++
 4 files changed, 136 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd2f22ad/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index cc2efc9..0e311dd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -53,6 +53,10 @@ extends AbstractDelegationTokenIdentifier>
   private static final Log LOG = LogFactory
   .getLog(AbstractDelegationTokenSecretManager.class);
 
+  private String formatTokenId(TokenIdent id) {
+return "(" + id + ")";
+  }
+
   /** 
* Cache of currently valid tokens, mapping from DelegationTokenIdentifier 
* to DelegationTokenInformation. Protected by this object lock.
@@ -312,7 +316,8 @@ extends AbstractDelegationTokenIdentifier>
 int keyId = identifier.getMasterKeyId();
 DelegationKey dKey = allKeys.get(keyId);
 if (dKey == null) {
-  LOG.warn("No KEY found for persisted identifier " + 
identifier.toString());
+  LOG.warn("No KEY found for persisted identifier "
+  + formatTokenId(identifier));
   return;
 }
 byte[] password = createPassword(identifier.getBytes(), dKey.getKey());
@@ -323,7 +328,8 @@ extends AbstractDelegationTokenIdentifier>
   currentTokens.put(identifier, new DelegationTokenInformation(renewDate,
   password, getTrackingIdIfEnabled(identifier)));
 } else {
-  throw new IOException("Same delegation token being added twice.");
+  throw new IOException("Same delegation token being added twice: "
+  + formatTokenId(identifier));
 }
   }
 
@@ -393,7 +399,7 @@ extends AbstractDelegationTokenIdentifier>
 identifier.setMaxDate(now + tokenMaxLifetime);
 identifier.setMasterKeyId(currentKey.getKeyId());
 identifier.setSequenceNumber(sequenceNum);
-LOG.info("Creating password for identifier: " + identifier
+LOG.info("Creating password for identifier: " + formatTokenId(identifier)
 + ", currentKey: " + currentKey.getKeyId());
 byte[] password = createPassword(identifier.getBytes(), 
currentKey.getKey());
 DelegationTokenInformation tokenInfo = new DelegationTokenInformation(now
@@ -401,7 +407,8 @@ extends AbstractDelegationTokenIdentifier>
 try {
   storeToken(identifier, tokenInfo);
 } catch (IOException ioe) {
-  LOG.error("Could not store token !!", ioe);
+  LOG.error("Could not store token " + formatTokenId(identifier) + "!!",
+  ioe);
 }
 return password;
   }
@@ -418,11 +425,14 @@ extends AbstractDelegationTokenIdentifier>
 assert Thread.holdsLock(this);
 DelegationTokenInformation info = getTokenInfo(identifier);
 if (info == null) {
-  throw new InvalidToken("token (" + identifier.toString()
-  + ") can't be found in cache");
+  throw new InvalidToken("token " + formatTokenId(identifier)
+  + " can't be found in cache");
 }
-if (info.getRenewDate() < Time.now()) {
-  throw new InvalidToken("token (" + identifier.toString() + ") is 
expired");
+long now = Time.now();
+if (info.getRenewDate() < now) {
+  throw new InvalidToken("token " + formatTokenId(identifier) + " is " +
+  "expired, current time: " + 

hadoop git commit: HADOOP-12718. Incorrect error message by fs -put local dir without permission. (John Zhuge via Yongjun Zhang)

2016-11-10 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8848a8a76 -> 470bdaa27


HADOOP-12718. Incorrect error message by fs -put local dir without permission. 
(John Zhuge via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/470bdaa2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/470bdaa2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/470bdaa2

Branch: refs/heads/trunk
Commit: 470bdaa27a771467fcd44dfa9c9c951501642ac6
Parents: 8848a8a
Author: Yongjun Zhang 
Authored: Thu Nov 10 22:38:38 2016 -0800
Committer: Yongjun Zhang 
Committed: Thu Nov 10 22:38:38 2016 -0800

--
 .../apache/hadoop/fs/FSExceptionMessages.java   |  2 +
 .../java/org/apache/hadoop/fs/FileUtil.java |  8 ++-
 .../org/apache/hadoop/fs/TestFsShellCopy.java   | 51 
 3 files changed, 60 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/470bdaa2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
index 95724ff..1511bb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
@@ -46,4 +46,6 @@ public class FSExceptionMessages {
 
   public static final String TOO_MANY_BYTES_FOR_DEST_BUFFER
   = "Requested more bytes than destination buffer size";
+
+  public static final String PERMISSION_DENIED = "Permission denied";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470bdaa2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 84a8abb..ea6249e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -29,6 +29,7 @@ import java.io.OutputStream;
 import java.net.InetAddress;
 import java.net.URI;
 import java.net.UnknownHostException;
+import java.nio.file.AccessDeniedException;
 import java.util.ArrayList;
 import java.util.Enumeration;
 import java.util.List;
@@ -1139,9 +1140,14 @@ public class FileUtil {
* an IOException to be thrown.
* @param dir directory for which listing should be performed
* @return list of file names or empty string list
-   * @exception IOException for invalid directory or for a bad disk.
+   * @exception AccessDeniedException for unreadable directory
+   * @exception IOException for invalid directory or for bad disk
*/
   public static String[] list(File dir) throws IOException {
+if (!canRead(dir)) {
+  throw new AccessDeniedException(dir.toString(), null,
+  FSExceptionMessages.PERMISSION_DENIED);
+}
 String[] fileNames = dir.list();
 if(fileNames == null) {
   throw new IOException("Invalid directory or I/O error occurred for dir: "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470bdaa2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
index 6ca3905..1db72d2 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
@@ -26,12 +26,15 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 
+import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
+import java.io.PrintStream;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Before;
@@ -607,4 +610,52 @@ public class TestFsShellCopy {
 

hadoop git commit: HDFS-11116. Fix javac warnings caused by deprecation of APIs in TestViewFsDefaultValue. Contributed by Yiqun Lin.

2016-11-10 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 93eeb1316 -> 8848a8a76


HDFS-6. Fix javac warnings caused by deprecation of APIs in 
TestViewFsDefaultValue. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8848a8a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8848a8a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8848a8a7

Branch: refs/heads/trunk
Commit: 8848a8a76c7eadebb15b347171057f906f6fc69b
Parents: 93eeb13
Author: Akira Ajisaka 
Authored: Fri Nov 11 13:44:10 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Nov 11 13:45:30 2016 +0900

--
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 10 +++---
 .../apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java | 12 +---
 2 files changed, 16 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8848a8a7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index acafc6c..e2ebe1b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -681,9 +681,13 @@ public class ViewFileSystem extends FileSystem {
 
   @Override
   public FsServerDefaults getServerDefaults(Path f) throws IOException {
-InodeTree.ResolveResult res =
-  fsState.resolve(getUriPath(f), true);
-return res.targetFileSystem.getServerDefaults(res.remainingPath);
+try {
+  InodeTree.ResolveResult res =
+  fsState.resolve(getUriPath(f), true);
+  return res.targetFileSystem.getServerDefaults(res.remainingPath);
+} catch (FileNotFoundException e) {
+  throw new NotInMountpointException(f, "getServerDefaults");
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8848a8a7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
index 8d7d2da..03e19f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
@@ -63,6 +63,7 @@ public class TestViewFsDefaultValue {
   
   static final String testFileDir = "/tmp/test/";
   static final String testFileName = testFileDir + 
"testFileStatusSerialziation";
+  static final String NOT_IN_MOUNTPOINT_FILENAME = "/NotInMountpointFile";
   private static MiniDFSCluster cluster;
   private static final FileSystemTestHelper fileSystemTestHelper = new 
FileSystemTestHelper(); 
   private static final Configuration CONF = new Configuration();
@@ -70,6 +71,8 @@ public class TestViewFsDefaultValue {
   private static FileSystem vfs;
   private static Path testFilePath;
   private static Path testFileDirPath;
+  // Use NotInMountpoint path to trigger the exception
+  private static Path notInMountpointPath;
 
   @BeforeClass
   public static void clusterSetupAtBegining() throws IOException,
@@ -86,12 +89,14 @@ public class TestViewFsDefaultValue {
 cluster.waitClusterUp();
 fHdfs = cluster.getFileSystem();
 fileSystemTestHelper.createFile(fHdfs, testFileName);
+fileSystemTestHelper.createFile(fHdfs, NOT_IN_MOUNTPOINT_FILENAME);
 Configuration conf = ViewFileSystemTestSetup.createConfig();
 ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri().toString() +
   "/tmp"));
 vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
 testFileDirPath = new Path (testFileDir);
 testFilePath = new Path (testFileName);
+notInMountpointPath = new Path(NOT_IN_MOUNTPOINT_FILENAME);
   }
 
 
@@ -105,7 +110,7 @@ public class TestViewFsDefaultValue {
 // but we are only looking at the defaultBlockSize, so this 
 // test should still pass
 try {
-  vfs.getDefaultBlockSize();
+  vfs.getDefaultBlockSize(notInMountpointPath);
   fail("getServerDefaults on viewFs did not throw excetion!");
 } catch (NotInMountpointException e) {
   assertEquals(vfs.getDefaultBlockSize(testFilePath), 
@@ -120,7 +125,7 @@ public 

[1/2] hadoop git commit: YARN-5821. Drop left-over preemption-related code and clean up method visibilities in the Schedulable hierarchy (Contributed by Karthik Kambatla via Daniel Templeton) [Forced

2016-11-10 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/YARN-4752 cda677655 -> 214067405 (forced update)


YARN-5821. Drop left-over preemption-related code and clean up method 
visibilities in the Schedulable hierarchy
(Contributed by Karthik Kambatla via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31518d06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31518d06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31518d06

Branch: refs/heads/YARN-4752
Commit: 31518d065bd2fc60f72907dda5b697d900666fc2
Parents: 07b9bf3
Author: Daniel Templeton 
Authored: Thu Nov 3 14:50:09 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Nov 10 16:15:54 2016 -0800

--
 .../scheduler/fair/FSAppAttempt.java|  84 ---
 .../scheduler/fair/FSLeafQueue.java | 102 +++
 .../scheduler/fair/FSParentQueue.java   |  46 ++---
 .../resourcemanager/scheduler/fair/FSQueue.java |  20 ++--
 .../scheduler/fair/Schedulable.java |  29 +++---
 .../scheduler/fair/FakeSchedulable.java |   5 -
 .../scheduler/fair/TestSchedulingPolicy.java|   5 -
 7 files changed, 60 insertions(+), 231 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31518d06/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index d9fdaba..06253d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -18,12 +18,10 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import java.io.Serializable;
 import java.text.DecimalFormat;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
-import java.util.Comparator;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -81,7 +79,6 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
   private FairScheduler scheduler;
   private FSQueue fsQueue;
   private Resource fairShare = Resources.createResource(0, 0);
-  private RMContainerComparator comparator = new RMContainerComparator();
 
   // Preemption related variables
   private Resource fairshareStarvation = Resources.none();
@@ -121,7 +118,7 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 this.resourceWeights = new ResourceWeights();
   }
 
-  public ResourceWeights getResourceWeights() {
+  ResourceWeights getResourceWeights() {
 return resourceWeights;
   }
 
@@ -132,7 +129,7 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 return queue.getMetrics();
   }
 
-  public void containerCompleted(RMContainer rmContainer,
+  void containerCompleted(RMContainer rmContainer,
   ContainerStatus containerStatus, RMContainerEventType event) {
 try {
   writeLock.lock();
@@ -491,7 +488,7 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
* @param schedulerKey Scheduler Key
* @param level NodeType
*/
-  public void resetAllowedLocalityLevel(
+  void resetAllowedLocalityLevel(
   SchedulerRequestKey schedulerKey, NodeType level) {
 NodeType old;
 try {
@@ -513,45 +510,33 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   }
 
   // Preemption related methods
-  public Resource getStarvation() {
+  Resource getStarvation() {
 return Resources.add(fairshareStarvation, minshareStarvation);
   }
 
-  public void setMinshareStarvation(Resource starvation) {
+  void setMinshareStarvation(Resource starvation) {
 this.minshareStarvation = starvation;
   }
 
-  public void resetMinshareStarvation() {
+  void resetMinshareStarvation() {
 this.minshareStarvation = Resources.none();
   }
 
-  public void addPreemption(RMContainer container) {
+  void addPreemption(RMContainer container) {
 containersToPreempt.add(container);
 Resources.addTo(preemptedResources, 

[2/2] hadoop git commit: YARN-5783. Verify identification of starved applications. (kasha)

2016-11-10 Thread kasha
YARN-5783. Verify identification of starved applications. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21406740
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21406740
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21406740

Branch: refs/heads/YARN-4752
Commit: 2140674052c5639f541ebcb34dd0019b984cfae1
Parents: 31518d0
Author: Karthik Kambatla 
Authored: Tue Nov 8 18:09:23 2016 -0800
Committer: Karthik Kambatla 
Committed: Thu Nov 10 16:15:55 2016 -0800

--
 .../scheduler/SchedulerApplicationAttempt.java  |  16 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java |  16 ++
 .../scheduler/fair/FSAppAttempt.java|  16 ++
 .../scheduler/fair/FSPreemptionThread.java  |   2 +-
 .../scheduler/fair/FSStarvedApps.java   |  56 +++--
 .../scheduler/fair/FairScheduler.java   |   7 +-
 .../fair/FairSchedulerWithMockPreemption.java   |  58 +
 .../scheduler/fair/TestFSAppStarvation.java | 245 +++
 8 files changed, 391 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21406740/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index bb1d461..8b3f60c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -1253,6 +1253,22 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
 unconfirmedAllocatedVcores.addAndGet(-res.getVirtualCores());
   }
 
+  @Override
+  public int hashCode() {
+return getApplicationAttemptId().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (! (o instanceof SchedulerApplicationAttempt)) {
+  return false;
+}
+
+SchedulerApplicationAttempt other = (SchedulerApplicationAttempt) o;
+return (this == other ||
+
this.getApplicationAttemptId().equals(other.getApplicationAttemptId()));
+  }
+
   /**
* Different state for Application Master, user can see this state from web 
UI
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21406740/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 6d9dda8..636d5b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -1100,4 +1100,20 @@ public class FiCaSchedulerApp extends 
SchedulerApplicationAttempt {
   }
 }
   }
+
+  /*
+   * Overriding to appease findbugs
+   */
+  @Override
+  public int hashCode() {
+return super.hashCode();
+  }
+
+  /*
+   * Overriding to appease findbugs
+   */
+  @Override
+  public boolean equals(Object o) {
+return super.equals(o);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21406740/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 

[29/50] [abbrv] hadoop git commit: YARN-5736. YARN container executor config does not handle white space (miklos.szeg...@cloudera.com via rkanter)

2016-11-10 Thread kasha
YARN-5736. YARN container executor config does not handle white space 
(miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09f43fa9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09f43fa9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09f43fa9

Branch: refs/heads/YARN-4752
Commit: 09f43fa9c089ebfc18401ce84755d3f2000ba033
Parents: 283fa33
Author: Robert Kanter 
Authored: Wed Nov 9 13:34:40 2016 +0100
Committer: Robert Kanter 
Committed: Wed Nov 9 13:34:40 2016 +0100

--
 .../container-executor/impl/configuration.c | 41 +--
 .../container-executor/impl/configuration.h |  9 +++
 .../main/native/container-executor/impl/main.c  |  5 +-
 .../test/test-container-executor.c  | 77 +++-
 4 files changed, 123 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09f43fa9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index 69ceaf6..8da7d24 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -31,6 +31,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define MAX_SIZE 10
 
@@ -126,6 +127,37 @@ int check_configuration_permissions(const char* file_name) 
{
   return 0;
 }
 
+/**
+ * Trim whitespace from beginning and end.
+*/
+char* trim(char* input)
+{
+char *val_begin;
+char *val_end;
+char *ret;
+
+if (input == NULL) {
+  return NULL;
+}
+
+val_begin = input;
+val_end = input + strlen(input);
+
+while (val_begin < val_end && isspace(*val_begin))
+  val_begin++;
+while (val_end > val_begin && isspace(*(val_end - 1)))
+  val_end--;
+
+ret = (char *) malloc(
+sizeof(char) * (val_end - val_begin + 1));
+if (ret == NULL) {
+  fprintf(ERRORFILE, "Allocation error\n");
+  exit(OUT_OF_MEMORY);
+}
+
+strncpy(ret, val_begin, val_end - val_begin);
+return ret;
+}
 
 void read_config(const char* file_name, struct configuration *cfg) {
   FILE *conf_file;
@@ -202,9 +234,8 @@ void read_config(const char* file_name, struct 
configuration *cfg) {
 #endif
 
 memset(cfg->confdetails[cfg->size], 0, sizeof(struct confentry));
-cfg->confdetails[cfg->size]->key = (char *) malloc(
-sizeof(char) * (strlen(equaltok)+1));
-strcpy((char *)cfg->confdetails[cfg->size]->key, equaltok);
+cfg->confdetails[cfg->size]->key = trim(equaltok);
+
 equaltok = strtok_r(NULL, "=", _equaltok);
 if (equaltok == NULL) {
   fprintf(LOGFILE, "configuration tokenization failed \n");
@@ -222,9 +253,7 @@ void read_config(const char* file_name, struct 
configuration *cfg) {
   fprintf(LOGFILE, "read_config : Adding conf value : %s \n", equaltok);
 #endif
 
-cfg->confdetails[cfg->size]->value = (char *) malloc(
-sizeof(char) * (strlen(equaltok)+1));
-strcpy((char *)cfg->confdetails[cfg->size]->value, equaltok);
+cfg->confdetails[cfg->size]->value = trim(equaltok);
 if((cfg->size + 1) % MAX_SIZE  == 0) {
   cfg->confdetails = (struct confentry **) realloc(cfg->confdetails,
   sizeof(struct confentry **) * (MAX_SIZE + cfg->size));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09f43fa9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
index eced13b..2d14867 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
@@ -105,3 +105,12 @@ int 

[28/50] [abbrv] hadoop git commit: YARN-5823. Update NMTokens in case of requests with only opportunistic containers. (Konstantinos Karanasos via asuresh)

2016-11-10 Thread kasha
YARN-5823. Update NMTokens in case of requests with only opportunistic 
containers. (Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/283fa33f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/283fa33f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/283fa33f

Branch: refs/heads/YARN-4752
Commit: 283fa33febe043bd7b4fa87546be26c9c5a8f8b5
Parents: ed0beba
Author: Arun Suresh 
Authored: Wed Nov 9 00:11:25 2016 -0800
Committer: Arun Suresh 
Committed: Wed Nov 9 00:11:25 2016 -0800

--
 .../TestOpportunisticContainerAllocation.java   | 71 +++-
 .../OpportunisticContainerAllocator.java| 55 ---
 .../containermanager/ContainerManagerImpl.java  |  2 +-
 .../scheduler/DistributedScheduler.java | 19 --
 .../ApplicationMasterService.java   |  3 +-
 ...pportunisticContainerAllocatorAMService.java | 23 ++-
 6 files changed, 137 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/283fa33f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java
index b9b4b02..ace145d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java
@@ -229,6 +229,9 @@ public class TestOpportunisticContainerAllocation {
 
   amClient.registerApplicationMaster("Host", 1, "");
 
+  testOpportunisticAllocation(
+  (AMRMClientImpl) amClient);
+
   testAllocation((AMRMClientImpl)amClient);
 
   amClient
@@ -247,7 +250,6 @@ public class TestOpportunisticContainerAllocation {
   final AMRMClientImpl amClient)
   throws YarnException, IOException {
 // setup container request
-
 assertEquals(0, amClient.ask.size());
 assertEquals(0, amClient.release.size());
 
@@ -388,6 +390,73 @@ public class TestOpportunisticContainerAllocation {
 assertEquals(0, amClient.release.size());
   }
 
+  /**
+   * Tests allocation with requests comprising only opportunistic containers.
+   */
+  private void testOpportunisticAllocation(
+  final AMRMClientImpl amClient)
+  throws YarnException, IOException {
+// setup container request
+assertEquals(0, amClient.ask.size());
+assertEquals(0, amClient.release.size());
+
+amClient.addContainerRequest(
+new AMRMClient.ContainerRequest(capability, null, null, priority, 0,
+true, null,
+ExecutionTypeRequest.newInstance(
+ExecutionType.OPPORTUNISTIC, true)));
+amClient.addContainerRequest(
+new AMRMClient.ContainerRequest(capability, null, null, priority, 0,
+true, null,
+ExecutionTypeRequest.newInstance(
+ExecutionType.OPPORTUNISTIC, true)));
+
+int oppContainersRequestedAny =
+amClient.getTable(0).get(priority, ResourceRequest.ANY,
+ExecutionType.OPPORTUNISTIC, capability).remoteRequest
+.getNumContainers();
+
+assertEquals(2, oppContainersRequestedAny);
+
+assertEquals(1, amClient.ask.size());
+assertEquals(0, amClient.release.size());
+
+// RM should allocate container within 2 calls to allocate()
+int allocatedContainerCount = 0;
+int iterationsLeft = 10;
+Set releases = new TreeSet<>();
+
+amClient.getNMTokenCache().clearCache();
+Assert.assertEquals(0,
+amClient.getNMTokenCache().numberOfTokensInCache());
+HashMap receivedNMTokens = new HashMap<>();
+
+while (allocatedContainerCount < oppContainersRequestedAny
+&& iterationsLeft-- > 0) {
+  AllocateResponse allocResponse = amClient.allocate(0.1f);
+  assertEquals(0, amClient.ask.size());
+  assertEquals(0, amClient.release.size());
+
+  for (Container container : allocResponse.getAllocatedContainers()) {
+allocatedContainerCount++;
+ContainerId rejectContainerId = container.getId();
+releases.add(rejectContainerId);
+  }
+
+  for (NMToken token : allocResponse.getNMTokens()) {
+String nodeID = token.getNodeId().toString();
+

[08/50] [abbrv] hadoop git commit: HADOOP-13795. Skip testGlobStatusThrowsExceptionForUnreadableDir in TestFSMainOperationsSwift. Contributed by John Zhuge.

2016-11-10 Thread kasha
HADOOP-13795. Skip testGlobStatusThrowsExceptionForUnreadableDir in 
TestFSMainOperationsSwift. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/acd509dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/acd509dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/acd509dc

Branch: refs/heads/YARN-4752
Commit: acd509dc57d6b8b3791a8332fec9bdf53a8f9d36
Parents: f768955
Author: Xiao Chen 
Authored: Mon Nov 7 09:21:01 2016 -0800
Committer: Xiao Chen 
Committed: Mon Nov 7 09:21:01 2016 -0800

--
 .../org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/acd509dc/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
--
diff --git 
a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
 
b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
index 74299df..b595f1c 100644
--- 
a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
+++ 
b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
@@ -76,6 +76,12 @@ public class TestFSMainOperationsSwift extends 
FSMainOperationsBaseTest {
 
   @Test(timeout = SWIFT_TEST_TIMEOUT)
   @Override
+  public void testGlobStatusThrowsExceptionForUnreadableDir() {
+SwiftTestUtils.skip("unsupported");
+  }
+
+  @Test(timeout = SWIFT_TEST_TIMEOUT)
+  @Override
   public void testFsStatus() throws Exception {
 super.testFsStatus();
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: YARN-5833. Add validation to ensure default ports are unique in Configuration. (Konstantinos Karanasos via Subru).

2016-11-10 Thread kasha
YARN-5833. Add validation to ensure default ports are unique in Configuration. 
(Konstantinos Karanasos via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29e3b341
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29e3b341
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29e3b341

Branch: refs/heads/YARN-4752
Commit: 29e3b3417c16c83dc8e753f94d7ca9957dddbedd
Parents: 3f93ac0
Author: Subru Krishnan 
Authored: Tue Nov 8 14:38:18 2016 -0800
Committer: Subru Krishnan 
Committed: Tue Nov 8 14:38:18 2016 -0800

--
 .../conf/TestConfigurationFieldsBase.java   | 47 
 .../hadoop/yarn/conf/YarnConfiguration.java |  2 +-
 .../yarn/conf/TestYarnConfigurationFields.java  | 11 +
 3 files changed, 59 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29e3b341/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index eab0161..9007c20 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.conf;
 
+import org.apache.commons.lang.StringUtils;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -147,6 +148,14 @@ public abstract class TestConfigurationFieldsBase {
   private Set xmlFieldsMissingInConfiguration = null;
 
   /**
+   * A set of strings used to check for collision of default values.
+   * For each of the set's strings, the default values containing that string
+   * in their name should not coincide.
+   */
+  @SuppressWarnings("checkstyle:visibilitymodifier")
+  protected Set filtersForDefaultValueCollisionCheck = new HashSet<>();
+
+  /**
* Member variable for debugging base class operation
*/
   protected boolean configDebug = false;
@@ -719,4 +728,42 @@ public abstract class TestConfigurationFieldsBase {
 System.out.println("=");
 System.out.println();
   }
+
+  /**
+   * For each specified string, get the default parameter values whose names
+   * contain the string. Then check whether any of these default values 
collide.
+   * This is, for example, useful to make sure there is no collision of default
+   * ports across different services.
+   */
+  @Test
+  public void testDefaultValueCollision() {
+for (String filter : filtersForDefaultValueCollisionCheck) {
+  System.out.println("Checking if any of the default values whose name " +
+  "contains string \"" + filter + "\" collide.");
+
+  // Map from filtered default value to name of the corresponding 
parameter.
+  Map filteredValues = new HashMap<>();
+
+  int valuesChecked = 0;
+  for (Map.Entry ent :
+  configurationDefaultVariables.entrySet()) {
+// Apply the name filter to the default parameters.
+if (ent.getKey().contains(filter)) {
+  // Check only for numerical values.
+  if (StringUtils.isNumeric(ent.getValue())) {
+String crtValue =
+filteredValues.putIfAbsent(ent.getValue(), ent.getKey());
+assertTrue("Parameters " + ent.getKey() + " and " + crtValue +
+" are using the same default value!", crtValue == null);
+  }
+  valuesChecked++;
+}
+  }
+
+  System.out.println(
+  "Checked " + valuesChecked + " default values for collision.");
+}
+
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29e3b341/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index c16e1ea..1fd25a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1744,7 +1744,7 @@ public class YarnConfiguration extends Configuration {
 
   public static final String 

[33/50] [abbrv] hadoop git commit: HDFS-11056. Concurrent append and read operations lead to checksum error. Contributed by Wei-Chiu Chuang.

2016-11-10 Thread kasha
HDFS-11056. Concurrent append and read operations lead to checksum error. 
Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c619e9b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c619e9b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c619e9b4

Branch: refs/heads/YARN-4752
Commit: c619e9b43fd00ba0e59a98ae09685ff719bb722b
Parents: 367c3d4
Author: Wei-Chiu Chuang 
Authored: Wed Nov 9 09:15:51 2016 -0800
Committer: Wei-Chiu Chuang 
Committed: Wed Nov 9 09:16:50 2016 -0800

--
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 41 +++
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 71 
 .../fsdataset/impl/FsDatasetImplTestUtils.java  | 14 
 3 files changed, 126 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c619e9b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 1627865..5880b3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -23,6 +23,7 @@ import java.io.FileOutputStream;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
+import java.io.RandomAccessFile;
 import java.net.URI;
 import java.nio.channels.ClosedChannelException;
 import java.nio.file.Files;
@@ -47,6 +48,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
@@ -59,6 +61,7 @@ import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline;
@@ -1102,6 +1105,28 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
 
 
+  private byte[] loadLastPartialChunkChecksum(
+  File blockFile, File metaFile) throws IOException {
+DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum();
+final int checksumSize = dcs.getChecksumSize();
+final long onDiskLen = blockFile.length();
+final int bytesPerChecksum = dcs.getBytesPerChecksum();
+
+if (onDiskLen % bytesPerChecksum == 0) {
+  // the last chunk is a complete one. No need to preserve its checksum
+  // because it will not be modified.
+  return null;
+}
+
+int offsetInChecksum = BlockMetadataHeader.getHeaderSize() +
+(int)(onDiskLen / bytesPerChecksum * checksumSize);
+byte[] lastChecksum = new byte[checksumSize];
+RandomAccessFile raf = new RandomAccessFile(metaFile, "r");
+raf.seek(offsetInChecksum);
+raf.read(lastChecksum, 0, checksumSize);
+return lastChecksum;
+  }
+
   public ReplicaInPipeline append(String bpid, ReplicaInfo replicaInfo,
   long newGS, long estimateBlockLen) throws IOException {
 
@@ -1126,6 +1151,13 @@ public class FsVolumeImpl implements FsVolumeSpi {
 .setBytesToReserve(bytesReserved)
 .buildLocalReplicaInPipeline();
 
+// load last checksum and datalen
+LocalReplica localReplica = (LocalReplica)replicaInfo;
+byte[] lastChunkChecksum = loadLastPartialChunkChecksum(
+localReplica.getBlockFile(), localReplica.getMetaFile());
+newReplicaInfo.setLastChecksumAndDataLen(
+replicaInfo.getNumBytes(), lastChunkChecksum);
+
 // rename meta file to rbw directory
 // rename block file to rbw directory
 newReplicaInfo.moveReplicaFrom(replicaInfo, newBlkFile);
@@ -1170,6 +1202,12 @@ public class FsVolumeImpl implements FsVolumeSpi {
 .setBytesToReserve(0)
 .buildLocalReplicaInPipeline();
 rbw.setBytesAcked(visible);
+
+// load last checksum and datalen
+

[16/50] [abbrv] hadoop git commit: HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used. Contributed by Erik Krogen.

2016-11-10 Thread kasha
HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used. 
Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3dbad5d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3dbad5d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3dbad5d8

Branch: refs/heads/YARN-4752
Commit: 3dbad5d823b8bf61b643dd1057165044138b99e0
Parents: de3b4aa
Author: Zhe Zhang 
Authored: Mon Nov 7 16:08:10 2016 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 7 16:08:10 2016 -0800

--
 .../apache/hadoop/metrics2/lib/MutableStat.java  |  4 
 .../apache/hadoop/metrics2/util/SampleStat.java  | 19 +++
 .../hadoop/metrics2/lib/TestMutableMetrics.java  | 17 +
 3 files changed, 36 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dbad5d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
index ae68874..92fe3d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
@@ -102,6 +102,10 @@ public class MutableStat extends MutableMetric {
 
   /**
* Add a number of samples and their sum to the running stat
+   *
+   * Note that although use of this method will preserve accurate mean values,
+   * large values for numSamples may result in inaccurate variance values due
+   * to the use of a single step of the Welford method for variance 
calculation.
* @param numSamples  number of samples
* @param sum of the samples
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dbad5d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
index cd9aaa4..23abfc4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
@@ -27,29 +27,32 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class SampleStat {
   private final MinMax minmax = new MinMax();
   private long numSamples = 0;
-  private double a0, a1, s0, s1;
+  private double a0, a1, s0, s1, total;
 
   /**
* Construct a new running sample stat
*/
   public SampleStat() {
 a0 = s0 = 0.0;
+total = 0.0;
   }
 
   public void reset() {
 numSamples = 0;
 a0 = s0 = 0.0;
+total = 0.0;
 minmax.reset();
   }
 
   // We want to reuse the object, sometimes.
   void reset(long numSamples, double a0, double a1, double s0, double s1,
- MinMax minmax) {
+  double total, MinMax minmax) {
 this.numSamples = numSamples;
 this.a0 = a0;
 this.a1 = a1;
 this.s0 = s0;
 this.s1 = s1;
+this.total = total;
 this.minmax.reset(minmax);
   }
 
@@ -58,7 +61,7 @@ public class SampleStat {
* @param other the destination to hold our values
*/
   public void copyTo(SampleStat other) {
-other.reset(numSamples, a0, a1, s0, s1, minmax);
+other.reset(numSamples, a0, a1, s0, s1, total, minmax);
   }
 
   /**
@@ -80,6 +83,7 @@ public class SampleStat {
*/
   public SampleStat add(long nSamples, double x) {
 numSamples += nSamples;
+total += x;
 
 if (numSamples == 1) {
   a0 = a1 = x;
@@ -103,10 +107,17 @@ public class SampleStat {
   }
 
   /**
+   * @return the total of all samples added
+   */
+  public double total() {
+return total;
+  }
+
+  /**
* @return  the arithmetic mean of the samples
*/
   public double mean() {
-return numSamples > 0 ? a1 : 0.0;
+return numSamples > 0 ? (total / numSamples) : 0.0;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dbad5d8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
 

[40/50] [abbrv] hadoop git commit: HDFS-11120. TestEncryptionZones should waitActive. Contributed by John Zhuge.

2016-11-10 Thread kasha
HDFS-11120. TestEncryptionZones should waitActive. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71adf44c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71adf44c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71adf44c

Branch: refs/heads/YARN-4752
Commit: 71adf44c3fc5655700cdc904e61366d438c938eb
Parents: de3a5f8
Author: Xiao Chen 
Authored: Wed Nov 9 17:15:19 2016 -0800
Committer: Xiao Chen 
Committed: Wed Nov 9 17:16:07 2016 -0800

--
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 61 +++-
 1 file changed, 34 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71adf44c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 3a0586e..8605b9e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -94,7 +94,9 @@ import org.apache.log4j.Logger;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
 import static org.junit.Assert.assertNotNull;
@@ -146,6 +148,9 @@ public class TestEncryptionZones {
   new Path(testRootDir.toString(), "test.jks").toUri();
   }
 
+  @Rule
+  public Timeout globalTimeout = new Timeout(120 * 1000);
+
   @Before
   public void setup() throws Exception {
 conf = new HdfsConfiguration();
@@ -160,6 +165,7 @@ public class TestEncryptionZones {
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
 2);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster.waitActive();
 Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
 fs = cluster.getFileSystem();
 fsWrapper = new FileSystemTestWrapper(fs);
@@ -231,7 +237,7 @@ public class TestEncryptionZones {
* with sticky bits.
* @throws Exception
*/
-  @Test(timeout = 6)
+  @Test
   public void testTrashStickyBit() throws Exception {
 // create an EZ /zones/zone1, make it world writable.
 final Path zoneParent = new Path("/zones");
@@ -294,7 +300,7 @@ public class TestEncryptionZones {
* with sticky bits.
* @throws Exception
*/
-  @Test(timeout = 6)
+  @Test
   public void testProvisionTrash() throws Exception {
 // create an EZ /zones/zone1
 final Path zoneParent = new Path("/zones");
@@ -326,7 +332,8 @@ public class TestEncryptionZones {
 assertTrue(trashFileStatus.getPermission().getStickyBit());
   }
 
-  @Test(timeout = 6)
+  // CHECKSTYLE:OFF:MethodLengthCheck
+  @Test
   public void testBasicOperations() throws Exception {
 
 int numZones = 0;
@@ -485,8 +492,9 @@ public class TestEncryptionZones {
 assertNumZones(numZones);
 assertZonePresent(null, nonpersistZone.toString());
   }
+  // CHECKSTYLE:ON:MethodLengthCheck
 
-  @Test(timeout = 6)
+  @Test
   public void testBasicOperationsRootDir() throws Exception {
 int numZones = 0;
 final Path rootDir = new Path("/");
@@ -510,7 +518,7 @@ public class TestEncryptionZones {
   /**
* Test listing encryption zones as a non super user.
*/
-  @Test(timeout = 6)
+  @Test
   public void testListEncryptionZonesAsNonSuperUser() throws Exception {
 
 final UserGroupInformation user = UserGroupInformation.
@@ -544,7 +552,7 @@ public class TestEncryptionZones {
   /**
* Test getEncryptionZoneForPath as a non super user.
*/
-  @Test(timeout = 6)
+  @Test
   public void testGetEZAsNonSuperUser() throws Exception {
 
 final UserGroupInformation user = UserGroupInformation.
@@ -688,12 +696,12 @@ public class TestEncryptionZones {
 }
   }
 
-  @Test(timeout = 6)
+  @Test
   public void testRenameFileSystem() throws Exception {
 doRenameEncryptionZone(fsWrapper);
   }
 
-  @Test(timeout = 6)
+  @Test
   public void testRenameFileContext() throws Exception {
 doRenameEncryptionZone(fcWrapper);
   }
@@ -703,7 +711,7 @@ public class TestEncryptionZones {
 return blocks.getFileEncryptionInfo();
   }
 
-  @Test(timeout = 12)
+  @Test
   public void testReadWrite() throws Exception {
 final HdfsAdmin dfsAdmin =
 new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
@@ -759,7 +767,7 @@ public class TestEncryptionZones {
  

[38/50] [abbrv] hadoop git commit: YARN-5611. Provide an API to update lifetime of an application. Contributed by Rohith Sharma K S

2016-11-10 Thread kasha
YARN-5611. Provide an API to update lifetime of an application. Contributed by 
Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcc15c62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcc15c62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcc15c62

Branch: refs/heads/YARN-4752
Commit: bcc15c6290b3912a054323695a6a931b0de163bd
Parents: edbee9e
Author: Jian He 
Authored: Wed Nov 9 14:33:58 2016 -0800
Committer: Jian He 
Committed: Wed Nov 9 16:08:05 2016 -0800

--
 .../hadoop/mapred/TestClientRedirect.java   |   9 +
 .../yarn/api/ApplicationClientProtocol.java |  23 ++
 .../UpdateApplicationTimeoutsRequest.java   |  81 +++
 .../UpdateApplicationTimeoutsResponse.java  |  46 
 .../records/ApplicationSubmissionContext.java   |   4 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   6 +-
 .../main/proto/applicationclient_protocol.proto |   1 +
 .../src/main/proto/yarn_protos.proto|   5 +
 .../src/main/proto/yarn_service_protos.proto|   9 +
 .../ApplicationClientProtocolPBClientImpl.java  |  21 +-
 .../ApplicationClientProtocolPBServiceImpl.java |  22 ++
 .../UpdateApplicationTimeoutsRequestPBImpl.java | 220 +++
 ...UpdateApplicationTimeoutsResponsePBImpl.java |  73 ++
 .../yarn/util/AbstractLivelinessMonitor.java|  17 +-
 .../java/org/apache/hadoop/yarn/util/Times.java |  33 +++
 .../src/main/resources/yarn-default.xml |   4 +-
 .../amrmproxy/MockResourceManagerFacade.java|   9 +
 .../server/resourcemanager/ClientRMService.java | 137 +---
 .../server/resourcemanager/RMAppManager.java|  37 
 .../server/resourcemanager/RMAuditLogger.java   |   4 +-
 .../server/resourcemanager/RMServerUtils.java   |  48 +++-
 .../resourcemanager/recovery/RMStateStore.java  |  28 ++-
 .../recovery/RMStateUpdateAppEvent.java |  15 +-
 .../recovery/records/ApplicationStateData.java  |  27 +++
 .../impl/pb/ApplicationStateDataPBImpl.java |  86 
 .../server/resourcemanager/rmapp/RMApp.java |   3 +
 .../server/resourcemanager/rmapp/RMAppImpl.java |  64 +-
 .../rmapp/monitor/RMAppLifetimeMonitor.java |  72 +++---
 .../scheduler/capacity/CapacityScheduler.java   |   3 +-
 .../yarn_server_resourcemanager_recovery.proto  |   1 +
 .../applicationsmanager/MockAsm.java|   6 +
 .../server/resourcemanager/rmapp/MockRMApp.java |   6 +
 .../rmapp/TestApplicationLifetimeMonitor.java   | 150 -
 33 files changed, 1149 insertions(+), 121 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc15c62/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 255f998..65eac65 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -124,6 +124,8 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
 import 
org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -485,6 +487,13 @@ public class TestClientRedirect {
 SignalContainerRequest request) throws IOException {
   return null;
 }
+
+@Override
+public UpdateApplicationTimeoutsResponse updateApplicationTimeouts(
+UpdateApplicationTimeoutsRequest request)
+throws YarnException, IOException {
+  return null;
+}
   }
 
   class HistoryService extends AMService implements HSClientProtocol {


[37/50] [abbrv] hadoop git commit: YARN-5611. Provide an API to update lifetime of an application. Contributed by Rohith Sharma K S

2016-11-10 Thread kasha
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc15c62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java
index e550c97..d194204 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java
@@ -18,9 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor;
 
-import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 
 import org.apache.commons.logging.Log;
@@ -33,7 +32,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.util.AbstractLivelinessMonitor;
 import org.apache.hadoop.yarn.util.SystemClock;
 
@@ -47,12 +45,6 @@ public class RMAppLifetimeMonitor
   private static final Log LOG = LogFactory.getLog(RMAppLifetimeMonitor.class);
 
   private RMContext rmContext;
-  private Map monitoredApps =
-  new HashMap();
-
-  private static final EnumSet COMPLETED_APP_STATES =
-  EnumSet.of(RMAppState.FINISHED, RMAppState.FINISHING, RMAppState.FAILED,
-  RMAppState.KILLED, RMAppState.FINAL_SAVING, RMAppState.KILLING);
 
   public RMAppLifetimeMonitor(RMContext rmContext) {
 super(RMAppLifetimeMonitor.class.getName(), SystemClock.getInstance());
@@ -61,14 +53,16 @@ public class RMAppLifetimeMonitor
 
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
-long monitorInterval = conf.getLong(
-YarnConfiguration.RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS,
-YarnConfiguration.DEFAULT_RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS);
+long monitorInterval =
+conf.getLong(YarnConfiguration.RM_APPLICATION_MONITOR_INTERVAL_MS,
+YarnConfiguration.DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS);
 if (monitorInterval <= 0) {
   monitorInterval =
-  
YarnConfiguration.DEFAULT_RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS;
+  YarnConfiguration.DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS;
 }
 setMonitorInterval(monitorInterval);
+setExpireInterval(0); // No need of expire interval for App.
+setResetTimeOnStart(false); // do not reset expire time on restart
 LOG.info("Application lifelime monitor interval set to " + monitorInterval
 + " ms.");
 super.serviceInit(conf);
@@ -77,54 +71,42 @@ public class RMAppLifetimeMonitor
   @SuppressWarnings("unchecked")
   @Override
   protected synchronized void expire(RMAppToMonitor monitoredAppKey) {
-Long remove = monitoredApps.remove(monitoredAppKey);
 ApplicationId appId = monitoredAppKey.getApplicationId();
 RMApp app = rmContext.getRMApps().get(appId);
 if (app == null) {
   return;
 }
-// Don't trigger a KILL event if application is in completed states
-if (!COMPLETED_APP_STATES.contains(app.getState())) {
-  String diagnostics =
-  "Application killed due to exceeding its lifetime period " + remove
-  + " milliseconds";
-  rmContext.getDispatcher().getEventHandler()
-  .handle(new RMAppEvent(appId, RMAppEventType.KILL, diagnostics));
-} else {
-  LOG.info("Application " + appId
-  + " is about to complete. So not killing the application.");
-}
+String diagnostics =
+"Application killed due to exceeding its lifetime period";
+rmContext.getDispatcher().getEventHandler()
+.handle(new RMAppEvent(appId, RMAppEventType.KILL, diagnostics));
   }
 
-  public synchronized void registerApp(ApplicationId appId,
-  ApplicationTimeoutType timeoutType, long monitorStartTime, long timeout) 
{
+  public void registerApp(ApplicationId appId,
+  ApplicationTimeoutType timeoutType, long expireTime) {
 RMAppToMonitor appToMonitor = new RMAppToMonitor(appId, 

[02/50] [abbrv] hadoop git commit: YARN-5497. [YARN-3368] Use different color for Undefined and Succeeded for Final State in applications page. (Akhil P B Tan via Sunil G)

2016-11-10 Thread kasha
YARN-5497. [YARN-3368] Use different color for Undefined and Succeeded for 
Final State in applications page. (Akhil P B Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12ddbbc6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12ddbbc6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12ddbbc6

Branch: refs/heads/YARN-4752
Commit: 12ddbbc61d0c90ac4c0057fd7b151eaa778123c8
Parents: bc273c4
Author: sunilg 
Authored: Thu Oct 27 14:45:23 2016 +0530
Committer: Wangda Tan 
Committed: Sun Nov 6 13:13:31 2016 -0800

--
 .../hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12ddbbc6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
index 0a5df87..8b5474f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
@@ -86,14 +86,17 @@ export default DS.Model.extend({
   }.property("progress"),
 
   finalStatusStyle: function() {
-var style = "default";
 var finalStatus = this.get("finalStatus");
+var style = "";
+
 if (finalStatus == "KILLED") {
   style = "warning";
 } else if (finalStatus == "FAILED") {
   style = "danger";
-} else {
+} else if (finalStatus == "SUCCEEDED") {
   style = "success";
+} else {
+  style = "default";
 }
 
 return "label label-" + style;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HDFS-9482. Replace DatanodeInfo constructors with a builder pattern. Contributed by Brahma Reddy Battula.

2016-11-10 Thread kasha
HDFS-9482. Replace DatanodeInfo constructors with a builder pattern. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed0bebab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed0bebab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed0bebab

Branch: refs/heads/YARN-4752
Commit: ed0bebabaaf27cd730f7f8eb002d92c9c7db327d
Parents: 62d8c17
Author: Brahma Reddy Battula 
Authored: Tue Nov 8 18:17:07 2016 -0800
Committer: Arpit Agarwal 
Committed: Tue Nov 8 18:17:07 2016 -0800

--
 .../hadoop/hdfs/DFSStripedOutputStream.java |   4 +-
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 217 +++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  22 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  42 ++--
 .../NamenodeProtocolServerSideTranslatorPB.java |   6 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   4 +-
 .../server/datanode/ReportBadBlockAction.java   |   4 +-
 .../erasurecode/StripedBlockWriter.java |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  37 ++--
 .../hadoop/hdfs/TestDFSClientSocketSize.java|   6 +-
 .../apache/hadoop/hdfs/TestFileCorruption.java  |   7 +-
 .../client/impl/TestBlockReaderFactory.java |   6 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|   4 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   6 +-
 .../impl/TestInterDatanodeProtocol.java |   4 +-
 .../shortcircuit/TestShortCircuitCache.java |  11 +-
 17 files changed, 277 insertions(+), 114 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index d5d0dfb..52fc5eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -766,7 +767,8 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
 newNodes[i] = nodes[0];
 newStorageIDs[i] = storageIDs[0];
   } else {
-newNodes[i] = new DatanodeInfo(DatanodeID.EMPTY_DATANODE_ID);
+newNodes[i] = new DatanodeInfoBuilder()
+.setNodeID(DatanodeID.EMPTY_DATANODE_ID).build();
 newStorageIDs[i] = "";
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index e9ee8b9..8f9f3d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -86,7 +86,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   protected AdminStates adminState;
   private long maintenanceExpireTimeInMS;
 
-  public DatanodeInfo(DatanodeInfo from) {
+  protected DatanodeInfo(DatanodeInfo from) {
 super(from);
 this.capacity = from.getCapacity();
 this.dfsUsed = from.getDfsUsed();
@@ -103,7 +103,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.upgradeDomain = from.getUpgradeDomain();
   }
 
-  public DatanodeInfo(DatanodeID nodeID) {
+  protected DatanodeInfo(DatanodeID nodeID) {
 super(nodeID);
 this.capacity = 0L;
 this.dfsUsed = 0L;
@@ -118,57 +118,13 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.adminState = null;
   }
 
-  public DatanodeInfo(DatanodeID nodeID, String location) {
+  

[31/50] [abbrv] hadoop git commit: YARN-5833. Addendum patch to include missing changes to yarn-default.xml

2016-11-10 Thread kasha
YARN-5833. Addendum patch to include missing changes to yarn-default.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/280357c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/280357c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/280357c2

Branch: refs/heads/YARN-4752
Commit: 280357c29f867e3ef6386ea5bd0f7b7ca6fe04eb
Parents: c074880
Author: Arun Suresh 
Authored: Wed Nov 9 07:15:11 2016 -0800
Committer: Arun Suresh 
Committed: Wed Nov 9 07:15:11 2016 -0800

--
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/280357c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e890b40..834ead7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2724,7 +2724,7 @@
 The address of the AMRMProxyService listener.
 
 yarn.nodemanager.amrmproxy.address
-0.0.0.0:8048
+0.0.0.0:8049
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: YARN-5356. NodeManager should communicate physical resource capability to ResourceManager. Contributed by Inigo Goiri

2016-11-10 Thread kasha
YARN-5356. NodeManager should communicate physical resource capability to 
ResourceManager. Contributed by Inigo Goiri


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f93ac07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f93ac07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f93ac07

Branch: refs/heads/YARN-4752
Commit: 3f93ac0733058238a2c8f23960c986c71dca0e02
Parents: dbb133c
Author: Jason Lowe 
Authored: Tue Nov 8 22:01:26 2016 +
Committer: Jason Lowe 
Committed: Tue Nov 8 22:01:26 2016 +

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |  5 +++
 .../yarn/sls/scheduler/RMNodeWrapper.java   |  5 +++
 .../yarn/util/ResourceCalculatorPlugin.java | 39 
 .../RegisterNodeManagerRequest.java | 24 
 .../pb/RegisterNodeManagerRequestPBImpl.java| 30 ++-
 .../yarn_server_common_service_protos.proto |  1 +
 .../hadoop/yarn/TestYarnServerApiClasses.java   |  3 ++
 .../nodemanager/NodeResourceMonitorImpl.java|  6 +--
 .../nodemanager/NodeStatusUpdaterImpl.java  | 17 -
 .../monitor/ContainersMonitorImpl.java  |  9 +
 .../resourcemanager/ResourceTrackerService.java |  3 +-
 .../server/resourcemanager/rmnode/RMNode.java   |  6 +++
 .../resourcemanager/rmnode/RMNodeImpl.java  | 23 +++-
 .../yarn/server/resourcemanager/MockNodes.java  | 17 ++---
 14 files changed, 166 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f93ac07/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index b18fb4a..8962aba 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -218,6 +218,11 @@ public class NodeInfo {
 public Integer getDecommissioningTimeout() {
   return null;
 }
+
+@Override
+public Resource getPhysicalResource() {
+  return null;
+}
   }
 
   public static RMNode newNodeInfo(String rackName, String hostName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f93ac07/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 4edc216..d7b159c 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -207,4 +207,9 @@ public class RMNodeWrapper implements RMNode {
   public Integer getDecommissioningTimeout() {
 return null;
   }
+
+  @Override
+  public Resource getPhysicalResource() {
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f93ac07/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
index 7b2ea56..e7e4c8a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.SysInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 /**
  * Plugin to calculate resource information on the system.
@@ -195,4 +196,42 @@ public class ResourceCalculatorPlugin extends Configured {
 return null;
   }
 
+  /**
+   * Create the ResourceCalculatorPlugin for the containers monitor in the Node
+   * Manager and configure it. If the plugin is not configured, this method
+   * will try and return a memory calculator plugin available for this 

[03/50] [abbrv] hadoop git commit: YARN-5779. [YARN-3368] Addendum patch to document limits/notes of the new YARN UI . (Sunil G via Sreenath Somarajapuram)

2016-11-10 Thread kasha
YARN-5779. [YARN-3368] Addendum patch to document limits/notes of the new YARN 
UI . (Sunil G via Sreenath Somarajapuram)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fad392a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fad392a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fad392a2

Branch: refs/heads/YARN-4752
Commit: fad392a22f1007e6b6e7f6af55051ebd912a6e4a
Parents: 64c7cda
Author: Sreenath Somarajapuram 
Authored: Fri Oct 28 18:20:55 2016 +0530
Committer: Wangda Tan 
Committed: Sun Nov 6 13:13:31 2016 -0800

--
 .../hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fad392a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
index 9f82031..e7853bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
@@ -38,6 +38,9 @@ origin (CORS) support.
 | `yarn.resourcemanager.webapp.cross-origin.enabled` | true | Enable CORS 
support for Resource Manager  |
 | `yarn.nodemanager.webapp.cross-origin.enabled` | true | Enable CORS support 
for Node Manager  |
 
+Also please ensure that CORS related configurations are enabled in 
`core-site.xml`.
+Kindly refer 
[here](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/HttpAuthentication.html)
+
 Use it
 -
 Open your browser, go to `rm-address:8088/ui2` and try it!


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-5783. Verify identification of starved applications. (kasha)

2016-11-10 Thread kasha
YARN-5783. Verify identification of starved applications. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cda67765
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cda67765
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cda67765

Branch: refs/heads/YARN-4752
Commit: cda677655b80ce8e58cdb75aa0e106d033d6d361
Parents: e3b5c48
Author: Karthik Kambatla 
Authored: Tue Nov 8 18:09:23 2016 -0800
Committer: Karthik Kambatla 
Committed: Thu Nov 10 14:48:15 2016 -0800

--
 .../scheduler/SchedulerApplicationAttempt.java  |  16 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java |  16 ++
 .../scheduler/fair/FSAppAttempt.java|  16 ++
 .../scheduler/fair/FSPreemptionThread.java  |   2 +-
 .../scheduler/fair/FSStarvedApps.java   |  56 +++--
 .../scheduler/fair/FairScheduler.java   |   7 +-
 .../fair/FairSchedulerWithMockPreemption.java   |  58 +
 .../scheduler/fair/TestFSAppStarvation.java | 245 +++
 8 files changed, 391 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cda67765/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index bb1d461..8b3f60c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -1253,6 +1253,22 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
 unconfirmedAllocatedVcores.addAndGet(-res.getVirtualCores());
   }
 
+  @Override
+  public int hashCode() {
+return getApplicationAttemptId().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (! (o instanceof SchedulerApplicationAttempt)) {
+  return false;
+}
+
+SchedulerApplicationAttempt other = (SchedulerApplicationAttempt) o;
+return (this == other ||
+
this.getApplicationAttemptId().equals(other.getApplicationAttemptId()));
+  }
+
   /**
* Different state for Application Master, user can see this state from web 
UI
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cda67765/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 6d9dda8..636d5b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -1100,4 +1100,20 @@ public class FiCaSchedulerApp extends 
SchedulerApplicationAttempt {
   }
 }
   }
+
+  /*
+   * Overriding to appease findbugs
+   */
+  @Override
+  public int hashCode() {
+return super.hashCode();
+  }
+
+  /*
+   * Overriding to appease findbugs
+   */
+  @Override
+  public boolean equals(Object o) {
+return super.equals(o);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cda67765/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 

[06/50] [abbrv] hadoop git commit: HADOOP-13798. TestHadoopArchives times out.

2016-11-10 Thread kasha
HADOOP-13798. TestHadoopArchives times out.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b970446b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b970446b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b970446b

Branch: refs/heads/YARN-4752
Commit: b970446b2c59f8897bb2c3a562fa192ed3452db5
Parents: ca33bdd
Author: Akira Ajisaka 
Authored: Mon Nov 7 19:53:43 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Nov 7 19:53:43 2016 +0900

--
 .../src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b970446b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
--
diff --git 
a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
 
b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
index 165c515..e9ecf04 100644
--- 
a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
+++ 
b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
@@ -444,7 +444,7 @@ public class TestHadoopArchives {
   int read; 
   while (true) {
 read = fsdis.read(buffer, readIntoBuffer, buffer.length - 
readIntoBuffer);
-if (read < 0) {
+if (read <= 0) {
   // end of stream:
   if (readIntoBuffer > 0) {
 baos.write(buffer, 0, readIntoBuffer);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: HADOOP-13687. Provide a unified dependency artifact that transitively includes the cloud storage modules shipped with Hadoop. Contributed by Chris Nauroth

2016-11-10 Thread kasha
HADOOP-13687. Provide a unified dependency artifact that transitively includes 
the cloud storage modules shipped with Hadoop. Contributed by Chris Nauroth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89354f04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89354f04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89354f04

Branch: refs/heads/YARN-4752
Commit: 89354f0475efa8e393697b1ddc227c94a76b5923
Parents: ca68f9c
Author: Mingliang Liu 
Authored: Thu Nov 10 08:58:37 2016 -0800
Committer: Mingliang Liu 
Committed: Thu Nov 10 08:58:37 2016 -0800

--
 .../hadoop-cloud-storage/pom.xml| 127 +++
 hadoop-cloud-storage-project/pom.xml|  54 
 hadoop-project/pom.xml  |   5 +
 hadoop-tools/hadoop-openstack/pom.xml   |   2 +-
 4 files changed, 187 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89354f04/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
--
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml 
b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
new file mode 100644
index 000..7993b83
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -0,0 +1,127 @@
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-project
+3.0.0-alpha2-SNAPSHOT
+../../hadoop-project
+  
+  org.apache.hadoop
+  hadoop-cloud-storage
+  3.0.0-alpha2-SNAPSHOT
+  jar
+
+  Apache Hadoop Cloud Storage
+  Apache Hadoop Cloud Storage
+
+  
+cloud-storage
+  
+
+  
+
+  org.apache.hadoop
+  hadoop-annotations
+  compile
+  
+
+  jdk.tools
+  jdk.tools
+
+  
+
+
+  org.apache.hadoop
+  hadoop-common
+  compile
+  
+
+  javax.servlet
+  servlet-api
+
+
+  commons-logging
+  commons-logging-api
+
+
+  jetty
+  org.mortbay.jetty
+
+
+  org.mortbay.jetty
+  jetty
+
+
+  org.mortbay.jetty
+  servlet-api-2.5
+
+
+  com.sun.jersey
+  jersey-core
+
+
+  com.sun.jersey
+  jersey-json
+
+
+  com.sun.jersey
+  jersey-server
+
+
+  org.eclipse.jdt
+  core
+
+
+  org.apache.avro
+  avro-ipc
+
+
+  net.sf.kosmosfs
+  kfs
+
+
+  net.java.dev.jets3t
+  jets3t
+
+
+  com.jcraft
+  jsch
+
+
+  org.apache.zookeeper
+  zookeeper
+
+  
+
+
+  org.apache.hadoop
+  hadoop-aws
+  compile
+
+
+  org.apache.hadoop
+  hadoop-azure
+  compile
+
+
+  org.apache.hadoop
+  hadoop-openstack
+  compile
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89354f04/hadoop-cloud-storage-project/pom.xml
--
diff --git a/hadoop-cloud-storage-project/pom.xml 
b/hadoop-cloud-storage-project/pom.xml
new file mode 100644
index 000..94d4c02
--- /dev/null
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -0,0 +1,54 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+  http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-project
+3.0.0-alpha2-SNAPSHOT
+../hadoop-project
+  
+  org.apache.hadoop
+  hadoop-cloud-storage-project
+  3.0.0-alpha2-SNAPSHOT
+  Apache Hadoop Cloud Storage Project
+  Apache Hadoop Cloud Storage Project
+  pom
+
+  
+hadoop-cloud-storage
+  
+
+  
+
+  
+maven-deploy-plugin
+
+  true
+
+  
+  
+org.apache.rat
+apache-rat-plugin
+
+
+  
+
+  
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89354f04/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index ca567c5..5750a3f 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -983,6 +983,11 @@
 ${project.version}
   
   
+

[48/50] [abbrv] hadoop git commit: [YARN-4752] YARN-5605. Preempt containers (all on one node) to meet the requirement of starved applications (Contributed by Karthik Kambatla via Daniel Templeton)

2016-11-10 Thread kasha
[YARN-4752] YARN-5605. Preempt containers (all on one node) to meet the 
requirement of starved applications (Contributed by Karthik Kambatla via Daniel 
Templeton)

Change-Id: Iee0962377d019dd64dc69a020725d2eaf360858c


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07b9bf30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07b9bf30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07b9bf30

Branch: refs/heads/YARN-4752
Commit: 07b9bf30550a3b83a54a12c0bad7b3267c36e33e
Parents: 3a98419
Author: Daniel Templeton 
Authored: Thu Sep 22 14:08:15 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Nov 10 14:34:54 2016 -0800

--
 .../hadoop/yarn/util/resource/Resources.java|4 +
 .../scheduler/AppSchedulingInfo.java|   17 +
 .../scheduler/fair/FSAppAttempt.java|  111 +-
 .../scheduler/fair/FSContext.java   |   54 +
 .../scheduler/fair/FSLeafQueue.java |  188 ++-
 .../scheduler/fair/FSParentQueue.java   |6 +-
 .../scheduler/fair/FSPreemptionThread.java  |  172 ++
 .../resourcemanager/scheduler/fair/FSQueue.java |   12 +-
 .../scheduler/fair/FSStarvedApps.java   |   75 +
 .../scheduler/fair/FairScheduler.java   |  271 +---
 .../scheduler/fair/FairSchedulerTestBase.java   |2 +-
 .../scheduler/fair/TestFSLeafQueue.java |   17 +-
 .../fair/TestFairSchedulerPreemption.java   | 1483 --
 13 files changed, 599 insertions(+), 1813 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07b9bf30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 760b0ea..462e02a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -143,6 +143,10 @@ public class Resources {
   public static Resource none() {
 return NONE;
   }
+
+  public static boolean isNone(Resource other) {
+return NONE.equals(other);
+  }
   
   public static Resource unbounded() {
 return UNBOUNDED;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07b9bf30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index ffb1885..51cd146 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -624,6 +624,23 @@ public class AppSchedulingInfo {
   }
 
   /**
+   * Method to return the next resource request to be serviced.
+   *
+   * In the initial implementation, we just pick any {@link ResourceRequest}
+   * corresponding to the highest priority.
+   *
+   * @return next {@link ResourceRequest} to allocate resources for.
+   */
+  @Unstable
+  public synchronized ResourceRequest getNextResourceRequest() {
+for (ResourceRequest rr:
+resourceRequestMap.get(schedulerKeys.firstKey()).values()) {
+  return rr;
+}
+return null;
+  }
+
+  /**
* Returns if the place (node/rack today) is either blacklisted by the
* application (user) or the system
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07b9bf30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 

[11/50] [abbrv] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-10 Thread kasha
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
new file mode 100644
index 000..9854a15
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class TestCapacitySchedulerAsyncScheduling {
+  private final int GB = 1024;
+
+  private YarnConfiguration conf;
+
+  RMNodeLabelsManager mgr;
+
+  @Before
+  public void setUp() throws Exception {
+conf = new YarnConfiguration();
+conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+ResourceScheduler.class);
+conf.setBoolean(
+CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
+mgr = new NullRMNodeLabelsManager();
+mgr.init(conf);
+  }
+
+  @Test(timeout = 30)
+  public void testSingleThreadAsyncContainerAllocation() throws Exception {
+testAsyncContainerAllocation(1);
+  }
+
+  @Test(timeout = 30)
+  public void testTwoThreadsAsyncContainerAllocation() throws Exception {
+testAsyncContainerAllocation(2);
+  }
+
+  @Test(timeout = 30)
+  public void testThreeThreadsAsyncContainerAllocation() throws Exception {
+testAsyncContainerAllocation(3);
+  }
+
+  public void testAsyncContainerAllocation(int numThreads) throws Exception {
+conf.setInt(
+CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_THREAD,
+numThreads);
+conf.setInt(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
++ ".scheduling-interval-ms", 100);
+
+final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
+mgr.init(conf);
+
+// inject node label manager
+MockRM rm = new MockRM(TestUtils.getConfigurationWithMultipleQueues(conf)) 
{
+  @Override
+  public RMNodeLabelsManager createNodeLabelManager() {
+return mgr;
+  }
+};
+
+rm.getRMContext().setNodeLabelManager(mgr);
+rm.start();
+
+List nms = new ArrayList<>();
+// Add 10 nodes to the cluster, in the cluster we have 200 GB resource
+for (int i = 0; i < 10; i++) {
+  nms.add(rm.registerNode("h-" + i + ":1234", 20 * GB));
+}
+
+List ams = new ArrayList<>();
+// Add 3 applications to the cluster, one app in one queue
+// the i-th app ask (20 * i) containers. So in total we will have
+// 123G container allocated
+int totalAsked = 3 * GB; // 3 AMs
+
+for (int i = 0; i < 3; i++) {
+  RMApp rmApp = rm.submitApp(1024, "app", "user", null, false,
+  Character.toString((char) (i % 34 + 97)), 1, null, null, false);
+  

[17/50] [abbrv] hadoop git commit: HDFS-11114. Support for running async disk checks in DataNode.

2016-11-10 Thread kasha
HDFS-4. Support for running async disk checks in DataNode.

This closes #153.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fff1585
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fff1585
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fff1585

Branch: refs/heads/YARN-4752
Commit: 3fff1585875ad322ce6e8acb485275e6a4360823
Parents: 3dbad5d
Author: Arpit Agarwal 
Authored: Mon Nov 7 18:45:53 2016 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 7 18:45:53 2016 -0800

--
 .../server/datanode/checker/AsyncChecker.java   |  63 +
 .../hdfs/server/datanode/checker/Checkable.java |  49 
 .../datanode/checker/ThrottledAsyncChecker.java | 224 +++
 .../server/datanode/checker/package-info.java   |  26 ++
 .../checker/TestThrottledAsyncChecker.java  | 276 +++
 5 files changed, 638 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fff1585/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
new file mode 100644
index 000..1d534a3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode.checker;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A class that can be used to schedule an asynchronous check on a given
+ * {@link Checkable}. If the check is successfully scheduled then a
+ * {@link ListenableFuture} is returned.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface AsyncChecker {
+
+  /**
+   * Schedule an asynchronous check for the given object.
+   *
+   * @param target object to be checked.
+   *
+   * @param context the interpretation of the context depends on the
+   *target.
+   *
+   * @return returns a {@link ListenableFuture} that can be used to
+   * retrieve the result of the asynchronous check.
+   */
+  ListenableFuture schedule(Checkable target, K context);
+
+  /**
+   * Cancel all executing checks and wait for them to complete.
+   * First attempts a graceful cancellation, then cancels forcefully.
+   * Waits for the supplied timeout after both attempts.
+   *
+   * See {@link ExecutorService#awaitTermination} for a description of
+   * the parameters.
+   *
+   * @throws InterruptedException
+   */
+  void shutdownAndWait(long timeout, TimeUnit timeUnit)
+  throws InterruptedException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fff1585/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
new file mode 100644
index 000..833ebda
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright 

[10/50] [abbrv] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-10 Thread kasha
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 51b567b..8694efb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -41,6 +41,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CyclicBarrier;
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -49,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
@@ -78,6 +80,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestK
 
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue.User;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
@@ -196,6 +199,7 @@ public class TestLeafQueue {
 
 cs.setRMContext(spyRMContext);
 cs.init(csConf);
+cs.setResourceCalculator(rC);
 cs.start();
 
 when(spyRMContext.getScheduler()).thenReturn(cs);
@@ -268,6 +272,12 @@ public class TestLeafQueue {
 any(Resource.class), any(FiCaSchedulerApp.class), 
any(FiCaSchedulerNode.class), 
 any(RMContainer.class), any(ContainerStatus.class), 
 any(RMContainerEventType.class), any(CSQueue.class), anyBoolean());
+
+// Stub out parent queue's accept and apply.
+doReturn(true).when(parent).accept(any(Resource.class),
+any(ResourceCommitRequest.class));
+doNothing().when(parent).apply(any(Resource.class),
+any(ResourceCommitRequest.class));
 
 return queue;
   }
@@ -339,6 +349,12 @@ public class TestLeafQueue {
 FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0,
 8*GB);
 
+Map apps = ImmutableMap.of(
+app_0.getApplicationAttemptId(), app_0, 
app_1.getApplicationAttemptId(),
+app_1);
+Map nodes = ImmutableMap.of(node_0.getNodeID(),
+node_0);
+
 final int numNodes = 1;
 Resource clusterResource = 
 Resources.createResource(numNodes * (8*GB), numNodes * 16);
@@ -353,8 +369,10 @@ public class TestLeafQueue {
 // Start testing...
 
 // Only 1 container
-a.assignContainers(clusterResource, node_0, new ResourceLimits(
-clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
+applyCSAssignment(clusterResource,
+a.assignContainers(clusterResource, node_0,
+new ResourceLimits(clusterResource),
+SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
 assertEquals(
 (int)(node_0.getTotalResource().getMemorySize() * a.getCapacity()) - 
(1*GB),
 a.getMetrics().getAvailableMB());
@@ -526,6 +544,12 @@ public class TestLeafQueue {
 FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0,
 8*GB);
 
+Map apps = ImmutableMap.of(
+app_0.getApplicationAttemptId(), app_0, 
app_1.getApplicationAttemptId(),
+app_1);
+Map nodes = ImmutableMap.of(node_0.getNodeID(),
+node_0);
+
 final int numNodes = 

[44/50] [abbrv] hadoop git commit: HDFS-9337. Validate required params for WebHDFS requests (Contributed by Jagadesh Kiran N)

2016-11-10 Thread kasha
HDFS-9337. Validate required params for WebHDFS requests (Contributed by 
Jagadesh Kiran N)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca68f9cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca68f9cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca68f9cb

Branch: refs/heads/YARN-4752
Commit: ca68f9cb5bc78e996c0daf8024cf0e7a4faef12a
Parents: 86ac1ad
Author: Vinayakumar B 
Authored: Thu Nov 10 16:51:33 2016 +0530
Committer: Vinayakumar B 
Committed: Thu Nov 10 16:51:33 2016 +0530

--
 .../web/resources/NamenodeWebHdfsMethods.java   | 31 +---
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md|  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   |  3 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 21 -
 4 files changed, 51 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca68f9cb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 15195e0..5d9b12a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -424,6 +424,18 @@ public class NamenodeWebHdfsMethods {
 excludeDatanodes, createFlagParam, noredirect);
   }
 
+  /** Validate all required params. */
+  @SuppressWarnings("rawtypes")
+  private void validateOpParams(HttpOpParam op, Param... params) {
+for (Param param : params) {
+  if (param.getValue() == null || param.getValueString() == null || param
+  .getValueString().isEmpty()) {
+throw new IllegalArgumentException("Required param " + param.getName()
++ " for op: " + op.getValueString() + " is null or empty");
+  }
+}
+  }
+
   /** Handle HTTP PUT request. */
   @PUT
   @Path("{" + UriFsPathParam.NAME + ":.*}")
@@ -576,6 +588,7 @@ public class NamenodeWebHdfsMethods {
 }
 case CREATESYMLINK:
 {
+  validateOpParams(op, destination);
   np.createSymlink(destination.getValue(), fullpath,
   PermissionParam.getDefaultSymLinkFsPermission(),
   createParent.getValue());
@@ -583,6 +596,7 @@ public class NamenodeWebHdfsMethods {
 }
 case RENAME:
 {
+  validateOpParams(op, destination);
   final EnumSet s = renameOptions.getValue();
   if (s.isEmpty()) {
 final boolean b = np.rename(fullpath, destination.getValue());
@@ -621,6 +635,7 @@ public class NamenodeWebHdfsMethods {
 }
 case RENEWDELEGATIONTOKEN:
 {
+  validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
   final long expiryTime = np.renewDelegationToken(token);
@@ -629,16 +644,19 @@ public class NamenodeWebHdfsMethods {
 }
 case CANCELDELEGATIONTOKEN:
 {
+  validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
   np.cancelDelegationToken(token);
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case MODIFYACLENTRIES: {
+  validateOpParams(op, aclPermission);
   np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true));
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case REMOVEACLENTRIES: {
+  validateOpParams(op, aclPermission);
   np.removeAclEntries(fullpath, aclPermission.getAclPermission(false));
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
@@ -651,10 +669,12 @@ public class NamenodeWebHdfsMethods {
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case SETACL: {
+  validateOpParams(op, aclPermission);
   np.setAcl(fullpath, aclPermission.getAclPermission(true));
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case SETXATTR: {
+  validateOpParams(op, xattrName, xattrSetFlag);
   np.setXAttr(
   fullpath,
   XAttrHelper.buildXAttr(xattrName.getXAttrName(),
@@ -662,6 +682,7 @@ public class NamenodeWebHdfsMethods {
   return 

[46/50] [abbrv] hadoop git commit: YARN-5834. TestNodeStatusUpdater.testNMRMConnectionConf compares nodemanager wait time to the incorrect value. (Chang Li via kasha)

2016-11-10 Thread kasha
YARN-5834. TestNodeStatusUpdater.testNMRMConnectionConf compares nodemanager 
wait time to the incorrect value. (Chang Li via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a984195
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a984195
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a984195

Branch: refs/heads/YARN-4752
Commit: 3a98419532687e4362ffc26abbc1264232820db7
Parents: 89354f0
Author: Karthik Kambatla 
Authored: Thu Nov 10 14:08:51 2016 -0800
Committer: Karthik Kambatla 
Committed: Thu Nov 10 14:08:51 2016 -0800

--
 .../hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a984195/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index 977cb76..59a4563 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -1524,13 +1524,13 @@ public class TestNodeStatusUpdater {
   long t = System.currentTimeMillis();
   long duration = t - waitStartTime;
   boolean waitTimeValid = (duration >= nmRmConnectionWaitMs) &&
-  (duration < (connectionWaitMs + delta));
+  (duration < (nmRmConnectionWaitMs + delta));
 
   if(!waitTimeValid) {
 // throw exception if NM doesn't retry long enough
 throw new Exception("NM should have tried re-connecting to RM during " 
+
-  "period of at least " + connectionWaitMs + " ms, but " +
-  "stopped retrying within " + (connectionWaitMs + delta) +
+  "period of at least " + nmRmConnectionWaitMs + " ms, but " +
+  "stopped retrying within " + (nmRmConnectionWaitMs + delta) +
   " ms: " + e, e);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: YARN-5856. Unnecessary duplicate start container request sent to NM State store. Contributed by Varun Saxena.

2016-11-10 Thread kasha
YARN-5856. Unnecessary duplicate start container request sent to NM State 
store. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de3a5f8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de3a5f8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de3a5f8d

Branch: refs/heads/YARN-4752
Commit: de3a5f8d08f64d0c2021a84b40e63e716da2321c
Parents: bcc15c6
Author: Naganarasimha 
Authored: Thu Nov 10 05:39:20 2016 +0530
Committer: Naganarasimha 
Committed: Thu Nov 10 05:42:30 2016 +0530

--
 .../server/nodemanager/containermanager/ContainerManagerImpl.java  | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3a5f8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index ab5827e..c7810f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -1034,8 +1034,6 @@ public class ContainerManagerImpl extends 
CompositeService implements
 containerTokenIdentifier.getVersion(), request);
 dispatcher.getEventHandler().handle(
   new ApplicationContainerInitEvent(container));
-this.context.getNMStateStore().storeContainer(containerId,
-containerTokenIdentifier.getVersion(), request);
 
 this.context.getContainerTokenSecretManager().startContainerSuccessful(
   containerTokenIdentifier);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: YARN-4329. [YARN-5437] Allow fetching exact reason as to why a submitted app is in ACCEPTED state in Fair Scheduler (Contributed by Yufei Gu)

2016-11-10 Thread kasha
YARN-4329. [YARN-5437] Allow fetching exact reason as to why a submitted app
is in ACCEPTED state in Fair Scheduler (Contributed by Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59ee8b7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59ee8b7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59ee8b7a

Branch: refs/heads/YARN-4752
Commit: 59ee8b7a88603e94b5661a8d5d088f7aa99fe049
Parents: 822ae88
Author: Daniel Templeton 
Authored: Wed Nov 9 13:11:37 2016 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 9 13:11:37 2016 -0800

--
 .../scheduler/fair/FSAppAttempt.java| 71 +++-
 .../scheduler/fair/FairScheduler.java   |  4 +-
 .../scheduler/fair/MaxRunningAppsEnforcer.java  | 50 --
 .../fair/TestMaxRunningAppsEnforcer.java|  2 +-
 4 files changed, 103 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59ee8b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 11922d9..df20117 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -766,8 +766,18 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 // The desired container won't fit here, so reserve
 if (isReservable(capability) &&
 reserve(request, node, reservedContainer, type, schedulerKey)) {
+  if (isWaitingForAMContainer()) {
+updateAMDiagnosticMsg(capability,
+" exceed the available resources of the node and the request is"
++ " reserved");
+  }
   return FairScheduler.CONTAINER_RESERVED;
 } else {
+  if (isWaitingForAMContainer()) {
+updateAMDiagnosticMsg(capability,
+" exceed the available resources of the node and the request 
cannot"
++ " be reserved");
+  }
   if (LOG.isDebugEnabled()) {
 LOG.debug("Couldn't creating reservation for " +
 getName() + ",at priority " +  request.getPriority());
@@ -920,23 +930,31 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 ResourceRequest rackRequest = getResourceRequest(key, node.getRackName());
 ResourceRequest nodeRequest = getResourceRequest(key, node.getNodeName());
 
-return
-// There must be outstanding requests at the given priority:
+boolean ret = true;
+if (!(// There must be outstanding requests at the given priority:
 anyRequest != null && anyRequest.getNumContainers() > 0 &&
-// If locality relaxation is turned off at *-level, there must be a
-// non-zero request for the node's rack:
-(anyRequest.getRelaxLocality() ||
-(rackRequest != null && rackRequest.getNumContainers() > 0)) &&
-// If locality relaxation is turned off at rack-level, there must 
be a
-// non-zero request at the node:
-(rackRequest == null || rackRequest.getRelaxLocality() ||
-(nodeRequest != null && nodeRequest.getNumContainers() > 0)) &&
-// The requested container must be able to fit on the node:
-Resources.lessThanOrEqual(RESOURCE_CALCULATOR, null,
-anyRequest.getCapability(),
-node.getRMNode().getTotalCapability()) &&
-// The requested container must fit in queue maximum share:
-getQueue().fitsInMaxShare(anyRequest.getCapability());
+// If locality relaxation is turned off at *-level, there must be a
+// non-zero request for the node's rack:
+(anyRequest.getRelaxLocality() ||
+(rackRequest != null && rackRequest.getNumContainers() > 0)) &&
+// If locality relaxation is turned off at rack-level, there must be a
+// non-zero request at the node:
+(rackRequest == null || rackRequest.getRelaxLocality() ||
+(nodeRequest != null 

[36/50] [abbrv] hadoop git commit: YARN-4498. Application level node labels stats to be available in REST (addendum patch). Contributed by Bibin A Chundatt.

2016-11-10 Thread kasha
YARN-4498. Application level node labels stats to be available in REST 
(addendum patch). Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edbee9e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edbee9e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edbee9e6

Branch: refs/heads/YARN-4752
Commit: edbee9e609e7f31d188660717ff9d3fb9f606abb
Parents: 59ee8b7
Author: Naganarasimha 
Authored: Thu Nov 10 05:00:05 2016 +0530
Committer: Naganarasimha 
Committed: Thu Nov 10 05:00:05 2016 +0530

--
 .../server/resourcemanager/webapp/dao/AppInfo.java|  4 ++--
 .../webapp/TestRMWebServiceAppsNodelabel.java | 14 +-
 2 files changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edbee9e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 3bd6cff..19cbe43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -111,7 +111,7 @@ public class AppInfo {
   protected String appNodeLabelExpression;
   protected String amNodeLabelExpression;
 
-  protected ResourcesInfo resourceInfo;
+  protected ResourcesInfo resourceInfo = null;
 
   public AppInfo() {
   } // JAXB needs this
@@ -232,7 +232,7 @@ public class AppInfo {
   .getApplicationAttempt(attempt.getAppAttemptId());
   resourceInfo = null != ficaAppAttempt
   ? new ResourcesInfo(ficaAppAttempt.getSchedulingResourceUsage())
-  : new ResourcesInfo();
+  : null;
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/edbee9e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java
index a931b0b..25a712c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java
@@ -19,10 +19,11 @@
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.Set;
 
 import javax.ws.rs.core.MediaType;
@@ -153,10 +154,13 @@ public class TestRMWebServiceAppsNodelabel extends 
JerseyTestBase {
 JSONObject json = response.getEntity(JSONObject.class);
 JSONObject apps = json.getJSONObject("apps");
 assertEquals("incorrect number of elements", 1, apps.length());
-JSONObject jsonObject =
-
apps.getJSONArray("app").getJSONObject(0).getJSONObject("resourceInfo");
-Iterator keys = jsonObject.keys();
-assertEquals("For finshed app no values expected", false, keys.hasNext());
+try {
+  apps.getJSONArray("app").getJSONObject(0).getJSONObject("resourceInfo");
+  fail("resourceInfo object shouldnt be available for finished apps");
+} catch (Exception e) {
+  assertTrue("resourceInfo shouldn't be available for finished apps",
+  true);
+}
 rm.stop();
   }
 



[42/50] [abbrv] hadoop git commit: YARN-5843. Incorrect documentation for timeline service entityType/events REST end points (Bibin A Chundatt via Varun Saxena)

2016-11-10 Thread kasha
YARN-5843. Incorrect documentation for timeline service entityType/events REST 
end points (Bibin A Chundatt via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8bc7a84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8bc7a84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8bc7a84

Branch: refs/heads/YARN-4752
Commit: c8bc7a84758f360849b96c2e19c8c41b7e9dbb65
Parents: c202a10
Author: Varun Saxena 
Authored: Thu Nov 10 12:10:03 2016 +0530
Committer: Varun Saxena 
Committed: Thu Nov 10 12:10:03 2016 +0530

--
 .../src/site/markdown/TimelineServer.md | 21 +---
 1 file changed, 9 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8bc7a84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index f09909b..ae9faae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -632,7 +632,7 @@ Use the following URI to obtain all the entity objects of a 
given
 
 ### HTTP Operations Supported:
 
-GET  http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT
+GET
 
 
 ### Query Parameters Supported:
@@ -687,7 +687,7 @@ will be returned as a collection of container objects. See 
also
 
 HTTP Request:
 
-GET http:///ws/v1/timeline/{entity-type}
+GET http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT
 
 Response Header:
 
@@ -795,7 +795,7 @@ String.
 
 HTTP Request:
 
-GET http:///ws/v1/timeline/{entity-type}/{entity-id}
+GET 
http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT/appattempt_1430424020775_0003_01
 
 Response Header:
 
@@ -805,8 +805,6 @@ Response Header:
 
 Response Body:
 
-
http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT/appattempt_1430424020775_0003_01
-
 {
   "events":[
 {
@@ -847,15 +845,17 @@ Use the following URI to obtain the event objects of the 
given `entityType`.
 
 ### Query Parameters Supported:
 
-1. `entityIds` - The entity IDs to retrieve events for.
+1. `entityId` - The entity IDs to retrieve events for. If null, no events will 
be returned.
+  Multiple entityIds can be given as comma separated values.
 1. `limit` - A limit on the number of events to return for each entity. If 
null,
   defaults to 100 events per entity.
 1. `windowStart` - If not null, retrieves only events later than the given time
   (exclusive)
 1. `windowEnd` - If not null, retrieves only events earlier than the given time
   (inclusive)
-1. `eventTypes` - Restricts the events returned to the given types. If null,
-  events of all types will be returned.
+1. `eventType` - Restricts the events returned to the given types. If null,
+  events of all types will be returned. Multiple eventTypes can be given as
+  comma separated values.
 
 ### Elements of the `events` (Timeline Entity List) Object
 
@@ -882,7 +882,7 @@ Below is the elements of a single event object.  Note that 
`value` of
 
 HTTP Request:
 
-GET http:///ws/v1/timeline/entity%20type%200/events
+GET 
http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT/events?entityId=appattempt_1430424020775_0003_01
 
 Response Header:
 
@@ -893,9 +893,6 @@ Response Header:
 Response Body:
 
 
-GET 
http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT/events?entityId=appattempt_1430424020775_0003_01
-
-
 {
 "events": [
   {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: HADOOP-13800. Remove unused HADOOP_AUDIT_LOGGER from hadoop-env.sh. Contributed by Yiqun Lin.

2016-11-10 Thread kasha
HADOOP-13800. Remove unused HADOOP_AUDIT_LOGGER from hadoop-env.sh. Contributed 
by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0748800
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0748800
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0748800

Branch: refs/heads/YARN-4752
Commit: c074880096bd41470a3358f6002f30b57a725375
Parents: 09f43fa
Author: Akira Ajisaka 
Authored: Wed Nov 9 22:02:40 2016 +0900
Committer: Akira Ajisaka 
Committed: Wed Nov 9 22:02:40 2016 +0900

--
 .../hadoop-common/src/main/conf/hadoop-env.sh  | 6 --
 1 file changed, 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0748800/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh 
b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index a78f3f6..4fb9be9 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -223,12 +223,6 @@ esac
 # Java property: hadoop.security.logger
 # export HADOOP_SECURITY_LOGGER=INFO,NullAppender
 
-# Default log level for file system audit messages.
-# Generally, this is specifically set in the namenode-specific
-# options line.
-# Java property: hdfs.audit.logger
-# export HADOOP_AUDIT_LOGGER=INFO,NullAppender
-
 # Default process priority level
 # Note that sub-processes will also run at this level!
 # export HADOOP_NICENESS=0


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: MAPREDUCE-6790. Update jackson from 1.9.13 to 2.x in hadoop-mapreduce.

2016-11-10 Thread kasha
MAPREDUCE-6790. Update jackson from 1.9.13 to 2.x in hadoop-mapreduce.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca33bdd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca33bdd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca33bdd5

Branch: refs/heads/YARN-4752
Commit: ca33bdd5c6afd45110edb6961d1c492bcb009472
Parents: 049e7d2
Author: Akira Ajisaka 
Authored: Mon Nov 7 11:19:21 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Nov 7 11:19:21 2016 +0900

--
 .../hadoop-mapreduce-client-app/pom.xml|  4 
 .../jobhistory/JobHistoryEventHandler.java |  4 ++--
 .../jobhistory/TestJobHistoryEventHandler.java |  5 +++--
 .../hadoop-mapreduce-client-core/pom.xml   |  4 
 .../org/apache/hadoop/mapred/QueueManager.java |  8 
 .../org/apache/hadoop/mapreduce/JobSubmitter.java  | 17 ++---
 .../mapreduce/util/JobHistoryEventUtils.java   |  9 +
 .../apache/hadoop/mapreduce/v2/TestRMNMInfo.java   |  4 ++--
 8 files changed, 30 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca33bdd5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index ef0ed6e..8634903 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -85,6 +85,10 @@
   test-jar
   test

+
+  com.fasterxml.jackson.core
+  jackson-databind
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca33bdd5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index e61bbaa..0403356 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -76,9 +76,9 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.node.JsonNodeFactory;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.JsonNodeFactory;
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca33bdd5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index bfe764d..54a2fad 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -73,8 +73,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
-import org.codehaus.jackson.JsonNode;
-import org.codehaus.jackson.map.ObjectMapper;
 import org.junit.After;
 

[20/50] [abbrv] hadoop git commit: HADOOP-13707. If kerberos is enabled while HTTP SPNEGO is not configured, some links cannot be accessed. Contributed by Yuanbo Liu.

2016-11-10 Thread kasha
HADOOP-13707. If kerberos is enabled while HTTP SPNEGO is not configured, some 
links cannot be accessed. Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbb133cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbb133cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbb133cc

Branch: refs/heads/YARN-4752
Commit: dbb133ccfc00e20622a5dbf7a6e1126fb63d7487
Parents: 026b39a
Author: Brahma Reddy Battula 
Authored: Tue Nov 8 20:52:36 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 8 20:55:10 2016 +0530

--
 .../org/apache/hadoop/conf/ConfServlet.java |  8 -
 .../hadoop/http/AdminAuthorizedServlet.java | 11 +--
 .../org/apache/hadoop/http/HttpServer2.java | 32 ++--
 .../org/apache/hadoop/jmx/JMXJsonServlet.java   |  8 -
 .../java/org/apache/hadoop/log/LogLevel.java| 11 +--
 .../org/apache/hadoop/http/TestHttpServer.java  | 17 ++-
 6 files changed, 75 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbb133cc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
index cdc9581..cfd7b97 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.conf;
 import java.io.IOException;
 import java.io.Writer;
 
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
@@ -58,7 +59,12 @@ public class ConfServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
   throws ServletException, IOException {
 
-if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
+// If user is a static user and auth Type is null, that means
+// there is a non-security environment and no need authorization,
+// otherwise, do the authorization.
+final ServletContext servletContext = getServletContext();
+if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
+!HttpServer2.isInstrumentationAccessAllowed(servletContext,
request, response)) {
   return;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbb133cc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
index a4b05a1..456e89f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.http;
 
 import java.io.IOException;
 
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
@@ -35,9 +36,13 @@ public class AdminAuthorizedServlet extends DefaultServlet {
 
   @Override
   protected void doGet(HttpServletRequest request, HttpServletResponse 
response)
- throws ServletException, IOException {
-// Do the authorization
-if (HttpServer2.hasAdministratorAccess(getServletContext(), request,
+  throws ServletException, IOException {
+// If user is a static user and auth Type is null, that means
+// there is a non-security environment and no need authorization,
+// otherwise, do the authorization.
+final ServletContext servletContext = getServletContext();
+if (HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) ||
+HttpServer2.hasAdministratorAccess(servletContext, request,
 response)) {
   // Authorization is done. Just call super.
   super.doGet(request, response);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbb133cc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 

[26/50] [abbrv] hadoop git commit: HDFS-11083. Add unit test for DFSAdmin -report command. Contributed by Xiaobing Zhou

2016-11-10 Thread kasha
HDFS-11083. Add unit test for DFSAdmin -report command. Contributed by Xiaobing 
Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62d8c17d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62d8c17d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62d8c17d

Branch: refs/heads/YARN-4752
Commit: 62d8c17dfda75a6a6de06aedad2f22699a1cbad6
Parents: e1c6ef2
Author: Mingliang Liu 
Authored: Tue Nov 8 17:04:25 2016 -0800
Committer: Mingliang Liu 
Committed: Tue Nov 8 17:04:30 2016 -0800

--
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 140 +++
 1 file changed, 140 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d8c17d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index c7ba9d2..9486bd1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -26,13 +26,22 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHEC
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 
+import org.apache.commons.lang.text.StrBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationUtil;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -452,4 +461,135 @@ public class TestDFSAdmin {
 assertThat(outs.get(offset + 2),
 is(allOf(containsString("To:"), containsString("6";
   }
+
+  private static String scanIntoString(final ByteArrayOutputStream baos) {
+final StrBuilder sb = new StrBuilder();
+final Scanner scanner = new Scanner(baos.toString());
+while (scanner.hasNextLine()) {
+  sb.appendln(scanner.nextLine());
+}
+scanner.close();
+return sb.toString();
+  }
+
+  @Test(timeout = 3)
+  public void testReportCommand() throws Exception {
+redirectStream();
+
+/* init conf */
+final Configuration dfsConf = new HdfsConfiguration();
+dfsConf.setInt(
+DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+500); // 0.5s
+dfsConf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
+final Path baseDir = new Path(
+PathUtils.getTestDir(getClass()).getAbsolutePath(),
+GenericTestUtils.getMethodName());
+dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.toString());
+
+final int numDn = 3;
+
+/* init cluster */
+try(MiniDFSCluster miniCluster = new MiniDFSCluster
+.Builder(dfsConf)
+.numDataNodes(numDn).build()) {
+
+  miniCluster.waitActive();
+  assertEquals(numDn, miniCluster.getDataNodes().size());
+
+  /* local vars */
+  final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+  final DFSClient client = miniCluster.getFileSystem().getClient();
+
+  /* run and verify report command */
+  resetStream();
+  assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
+  verifyNodesAndCorruptBlocks(numDn, numDn, 0, client);
+
+  /* shut down one DN */
+  final List datanodes = miniCluster.getDataNodes();
+  final DataNode last = datanodes.get(datanodes.size() - 1);
+  last.shutdown();
+  miniCluster.setDataNodeDead(last.getDatanodeId());
+
+  /* run and verify report command */
+  assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
+  verifyNodesAndCorruptBlocks(numDn, numDn - 1, 0, client);
+
+  /* corrupt one block */
+  final short replFactor = 1;
+  final long fileLength = 512L;
+  final FileSystem fs = miniCluster.getFileSystem();
+  final Path file = new 

[09/50] [abbrv] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-10 Thread kasha
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
index 42a8872..d875969 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
@@ -49,8 +49,12 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -123,6 +127,27 @@ public class TestParentQueue {
 return application;
   }
 
+  private void applyAllocationToQueue(Resource clusterResource,
+  int allocatedMem,
+  CSQueue queue) {
+// Call accept & apply for queue
+ResourceCommitRequest request = mock(ResourceCommitRequest.class);
+when(request.anythingAllocatedOrReserved()).thenReturn(true);
+ContainerAllocationProposal allocation = mock(
+ContainerAllocationProposal.class);
+when(request.getTotalReleasedResource()).thenReturn(Resources.none());
+
when(request.getFirstAllocatedOrReservedContainer()).thenReturn(allocation);
+SchedulerContainer scontainer = mock(SchedulerContainer.class);
+when(allocation.getAllocatedOrReservedContainer()).thenReturn(scontainer);
+when(allocation.getAllocatedOrReservedResource()).thenReturn(
+Resources.createResource(allocatedMem));
+when(scontainer.getNodePartition()).thenReturn("");
+
+if (queue.accept(clusterResource, request)) {
+  queue.apply(clusterResource, request);
+}
+  }
+
   private void stubQueueAllocation(final CSQueue queue, 
   final Resource clusterResource, final FiCaSchedulerNode node, 
   final int allocation) {
@@ -157,7 +182,7 @@ public class TestParentQueue {
 // Next call - nothing
 if (allocation > 0) {
   doReturn(new CSAssignment(Resources.none(), type)).when(queue)
-  .assignContainers(eq(clusterResource), eq(node),
+  .assignContainers(eq(clusterResource), any(PlacementSet.class),
   any(ResourceLimits.class), any(SchedulingMode.class));
 
   // Mock the node's resource availability
@@ -168,7 +193,7 @@ public class TestParentQueue {
 
 return new CSAssignment(allocatedResource, type);
   }
-}).when(queue).assignContainers(eq(clusterResource), eq(node),
+}).when(queue).assignContainers(eq(clusterResource), 
any(PlacementSet.class),
 any(ResourceLimits.class), any(SchedulingMode.class));
   }
   
@@ -205,8 +230,8 @@ public class TestParentQueue {
 setupSingleLevelQueues(csConf);
 
 Map queues = new HashMap();
-CSQueue root = 
-CapacityScheduler.parseQueue(csContext, csConf, null, 
+CSQueue root =
+CapacityScheduler.parseQueue(csContext, csConf, null,
 CapacitySchedulerConfiguration.ROOT, queues, queues, 
 TestUtils.spyHook);
 
@@ -245,13 +270,18 @@ public class TestParentQueue {
 // Now, A should get the scheduling opportunity since A=0G/6G, B=1G/14G
 stubQueueAllocation(a, clusterResource, node_1, 2*GB);
 stubQueueAllocation(b, clusterResource, node_1, 1*GB);
-root.assignContainers(clusterResource, 

[41/50] [abbrv] hadoop git commit: YARN-5862. TestDiskFailures.testLocalDirsFailures failed (Yufei Gu via Varun Saxena)

2016-11-10 Thread kasha
YARN-5862. TestDiskFailures.testLocalDirsFailures failed (Yufei Gu via Varun 
Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c202a109
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c202a109
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c202a109

Branch: refs/heads/YARN-4752
Commit: c202a10923a46a6e7f7f518e6e3dbb6545dbb971
Parents: 71adf44
Author: Varun Saxena 
Authored: Thu Nov 10 11:41:34 2016 +0530
Committer: Varun Saxena 
Committed: Thu Nov 10 11:41:34 2016 +0530

--
 .../java/org/apache/hadoop/yarn/server/TestDiskFailures.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c202a109/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
index c7e34d8..bf82ec5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
@@ -244,7 +244,9 @@ public class TestDiskFailures {
 for (int i = 0; i < 10; i++) {
   Iterator iter = yarnCluster.getResourceManager().getRMContext()
   .getRMNodes().values().iterator();
-  if ((iter.next().getState() != NodeState.UNHEALTHY) == isHealthy) {
+  // RMNode # might be zero because of timing related issue.
+  if (iter.hasNext() &&
+  (iter.next().getState() != NodeState.UNHEALTHY) == isHealthy) {
 break;
   }
   // wait for the node health info to go to RM


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: HADOOP-13797 Remove hardcoded absolute path for ls. Contributed by Christine Koppelt

2016-11-10 Thread kasha
HADOOP-13797 Remove hardcoded absolute path for ls. Contributed by Christine 
Koppelt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7689557
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7689557
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7689557

Branch: refs/heads/YARN-4752
Commit: f76895573d0166b4b582ff69c3f9c159ab14661f
Parents: b970446
Author: Steve Loughran 
Authored: Mon Nov 7 12:36:10 2016 +
Committer: Steve Loughran 
Committed: Mon Nov 7 12:36:10 2016 +

--
 .../hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7689557/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 0745057..5fc9869 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -237,7 +237,7 @@ public abstract class Shell {
   /** Return a command to get permission information. */
   public static String[] getGetPermissionCommand() {
 return (WINDOWS) ? new String[] { getWinUtilsPath(), "ls", "-F" }
- : new String[] { "/bin/ls", "-ld" };
+ : new String[] { "ls", "-ld" };
   }
 
   /** Return a command to set permission. */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: YARN-5821. Drop left-over preemption-related code and clean up method visibilities in the Schedulable hierarchy (Contributed by Karthik Kambatla via Daniel Templeton

2016-11-10 Thread kasha
YARN-5821. Drop left-over preemption-related code and clean up method 
visibilities in the Schedulable hierarchy
(Contributed by Karthik Kambatla via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3b5c489
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3b5c489
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3b5c489

Branch: refs/heads/YARN-4752
Commit: e3b5c489bd96e51928f15604e2f99b21cf590997
Parents: 07b9bf3
Author: Daniel Templeton 
Authored: Thu Nov 3 14:50:09 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Nov 10 14:48:15 2016 -0800

--
 .../scheduler/fair/FSAppAttempt.java|  77 --
 .../scheduler/fair/FSLeafQueue.java | 102 +++
 .../scheduler/fair/FSParentQueue.java   |  46 ++---
 .../resourcemanager/scheduler/fair/FSQueue.java |  20 ++--
 .../scheduler/fair/Schedulable.java |  29 +++---
 .../scheduler/fair/FakeSchedulable.java |   5 -
 .../scheduler/fair/TestSchedulingPolicy.java|   5 -
 7 files changed, 63 insertions(+), 221 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3b5c489/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index d9fdaba..8b4627e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -18,18 +18,17 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import java.io.Serializable;
 import java.text.DecimalFormat;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
-import java.util.Comparator;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import static com.sun.xml.internal.xsom.impl.UName.comparator;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -81,7 +80,6 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
   private FairScheduler scheduler;
   private FSQueue fsQueue;
   private Resource fairShare = Resources.createResource(0, 0);
-  private RMContainerComparator comparator = new RMContainerComparator();
 
   // Preemption related variables
   private Resource fairshareStarvation = Resources.none();
@@ -121,7 +119,7 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 this.resourceWeights = new ResourceWeights();
   }
 
-  public ResourceWeights getResourceWeights() {
+  ResourceWeights getResourceWeights() {
 return resourceWeights;
   }
 
@@ -132,7 +130,7 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 return queue.getMetrics();
   }
 
-  public void containerCompleted(RMContainer rmContainer,
+  void containerCompleted(RMContainer rmContainer,
   ContainerStatus containerStatus, RMContainerEventType event) {
 try {
   writeLock.lock();
@@ -491,7 +489,7 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
* @param schedulerKey Scheduler Key
* @param level NodeType
*/
-  public void resetAllowedLocalityLevel(
+  void resetAllowedLocalityLevel(
   SchedulerRequestKey schedulerKey, NodeType level) {
 NodeType old;
 try {
@@ -513,45 +511,45 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   }
 
   // Preemption related methods
-  public Resource getStarvation() {
+  Resource getStarvation() {
 return Resources.add(fairshareStarvation, minshareStarvation);
   }
 
-  public void setMinshareStarvation(Resource starvation) {
+  void setMinshareStarvation(Resource starvation) {
 this.minshareStarvation = starvation;
   }
 
-  public void resetMinshareStarvation() {
+  void resetMinshareStarvation() {
 this.minshareStarvation = Resources.none();
   }
 
-  public void 

[01/50] [abbrv] hadoop git commit: YARN-5772. [YARN-3368] Replace old Hadoop logo with new one (Akhil P B Tan via Sunil G) [Forced Update!]

2016-11-10 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/YARN-4752 9568f41e5 -> cda677655 (forced update)


YARN-5772. [YARN-3368] Replace old Hadoop logo with new one (Akhil P B Tan via 
Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e93f900b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e93f900b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e93f900b

Branch: refs/heads/YARN-4752
Commit: e93f900b5544b4c7fc1b5279baff81ded6be56f5
Parents: 825de90
Author: sunilg 
Authored: Thu Oct 27 10:45:47 2016 +0530
Committer: Wangda Tan 
Committed: Sun Nov 6 13:13:31 2016 -0800

--
 .../webapp/public/assets/images/hadoop_logo.png | Bin 26495 -> 31716 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93f900b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
index 275d39e..d481395 100644
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 and 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: HADOOP-13590. Retry until TGT expires even if the UGI renewal thread encountered exception.

2016-11-10 Thread kasha
HADOOP-13590. Retry until TGT expires even if the UGI renewal thread 
encountered exception.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/367c3d41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/367c3d41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/367c3d41

Branch: refs/heads/YARN-4752
Commit: 367c3d41217728c2e61252c5a5235e5bc1f9822f
Parents: 280357c
Author: Xiao Chen 
Authored: Wed Nov 9 09:07:12 2016 -0800
Committer: Xiao Chen 
Committed: Wed Nov 9 09:07:12 2016 -0800

--
 .../hadoop/security/UserGroupInformation.java   |  70 -
 .../hadoop/security/TestUGIWithMiniKdc.java | 144 +++
 .../security/TestUserGroupInformation.java  |  93 
 3 files changed, 303 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/367c3d41/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 111c3f8..82603a4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -43,6 +43,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 import javax.security.auth.Subject;
 import javax.security.auth.callback.CallbackHandler;
@@ -54,14 +55,18 @@ import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginException;
 import javax.security.auth.spi.LoginModule;
 
+import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
@@ -85,7 +90,8 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive", 
"Oozie"})
 @InterfaceStability.Evolving
 public class UserGroupInformation {
-  private static final Logger LOG = LoggerFactory.getLogger(
+  @VisibleForTesting
+  static final Logger LOG = LoggerFactory.getLogger(
   UserGroupInformation.class);
 
   /**
@@ -121,6 +127,10 @@ public class UserGroupInformation {
 MutableRate loginFailure;
 @Metric("GetGroups") MutableRate getGroups;
 MutableQuantiles[] getGroupsQuantiles;
+@Metric("Renewal failures since startup")
+private MutableGaugeLong renewalFailuresTotal;
+@Metric("Renewal failures since last successful login")
+private MutableGaugeInt renewalFailures;
 
 static UgiMetrics create() {
   return DefaultMetricsSystem.instance().register(new UgiMetrics());
@@ -138,6 +148,10 @@ public class UserGroupInformation {
 }
   }
 }
+
+MutableGaugeInt getRenewalFailures() {
+  return renewalFailures;
+}
   }
   
   /**
@@ -963,6 +977,7 @@ public class UserGroupInformation {
   return;
 }
 long nextRefresh = getRefreshTime(tgt);
+RetryPolicy rp = null;
 while (true) {
   try {
 long now = Time.now();
@@ -986,13 +1001,40 @@ public class UserGroupInformation {
 }
 nextRefresh = Math.max(getRefreshTime(tgt),
   now + kerberosMinSecondsBeforeRelogin);
+metrics.renewalFailures.set(0);
+rp = null;
   } catch (InterruptedException ie) {
 LOG.warn("Terminating renewal thread");
 return;
   } catch (IOException ie) {
-LOG.warn("Exception encountered while running the" +
-" renewal command. Aborting renew thread. " + ie);
-return;
+metrics.renewalFailuresTotal.incr();
+final long tgtEndTime = tgt.getEndTime().getTime();
+LOG.warn("Exception encountered while running the 

[25/50] [abbrv] hadoop git commit: HADOOP-13789. Hadoop Common includes generated test protos in both jar and test-jar. Contributed by Sean Busbey.

2016-11-10 Thread kasha
HADOOP-13789. Hadoop Common includes generated test protos in both jar and 
test-jar. Contributed by Sean Busbey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1c6ef2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1c6ef2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1c6ef2e

Branch: refs/heads/YARN-4752
Commit: e1c6ef2efa9d87fdfc7474ca63998a13a3929874
Parents: 77c13c3
Author: Andrew Wang 
Authored: Tue Nov 8 16:33:55 2016 -0800
Committer: Andrew Wang 
Committed: Tue Nov 8 16:33:55 2016 -0800

--
 hadoop-common-project/hadoop-common/pom.xml |   6 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   2 -
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   2 -
 .../hadoop-mapreduce-client-common/pom.xml  |   2 -
 .../hadoop-mapreduce-client-shuffle/pom.xml |   2 -
 .../hadoop/maven/plugin/protoc/ProtocMojo.java  | 228 +--
 .../maven/plugin/protoc/ProtocRunner.java   | 283 +++
 .../maven/plugin/protoc/ProtocTestMojo.java |  61 
 .../maven/plugin/protoc/package-info.java   |  98 +++
 .../hadoop-yarn/hadoop-yarn-api/pom.xml |   2 -
 .../hadoop-yarn/hadoop-yarn-client/pom.xml  |   2 -
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |   2 -
 .../pom.xml |   2 -
 .../hadoop-yarn-server-common/pom.xml   |   2 -
 .../hadoop-yarn-server-nodemanager/pom.xml  |   2 -
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   8 +-
 .../hadoop-yarn-server-tests/pom.xml|   6 +-
 17 files changed, 454 insertions(+), 256 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1c6ef2e/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 645d495..37f0b70 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -369,7 +369,6 @@
   
   
 compile-protoc
-generate-sources
 
   protoc
 
@@ -397,14 +396,12 @@
   GenericRefreshProtocol.proto
 
   
-  
${project.build.directory}/generated-sources/java
 
   
   
 compile-test-protoc
-generate-test-sources
 
-  protoc
+  test-protoc
 
 
   ${protobuf.version}
@@ -419,7 +416,6 @@
   test_rpc_service.proto
 
   
-  
${project.build.directory}/generated-test-sources/java
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1c6ef2e/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 193f423..8ce5f3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -138,7 +138,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   
 compile-protoc
-generate-sources
 
   protoc
 
@@ -164,7 +163,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   ReconfigurationProtocol.proto
 
   
-  
${project.build.directory}/generated-sources/java
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1c6ef2e/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index b3ad50a..5427db8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -325,7 +325,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   
 compile-protoc
-generate-sources
 
   protoc
 
@@ -352,7 +351,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   fsimage.proto
 
   
-  
${project.build.directory}/generated-sources/java
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1c6ef2e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
--
diff --git 

[18/50] [abbrv] hadoop git commit: YARN-5377. Fix TestQueuingContainerManager.testKillMultipleOpportunisticContainers. (Konstantinos Karanasos via asuresh)

2016-11-10 Thread kasha
YARN-5377. Fix 
TestQueuingContainerManager.testKillMultipleOpportunisticContainers. 
(Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f38a6d03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f38a6d03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f38a6d03

Branch: refs/heads/YARN-4752
Commit: f38a6d03a11ca6de93a225563ddf55ec99d5063c
Parents: 3fff158
Author: Arun Suresh 
Authored: Mon Nov 7 22:10:03 2016 -0800
Committer: Arun Suresh 
Committed: Mon Nov 7 22:10:03 2016 -0800

--
 .../BaseContainerManagerTest.java   | 38 +---
 .../queuing/TestQueuingContainerManager.java|  6 ++--
 2 files changed, 29 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f38a6d03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index d359c3d..6dd1ac7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -284,15 +284,17 @@ public abstract class BaseContainerManagerTest {
 .build());
   }
 
-  public static void waitForContainerState(ContainerManagementProtocol 
containerManager,
-  ContainerId containerID, ContainerState finalState)
+  public static void waitForContainerState(
+  ContainerManagementProtocol containerManager, ContainerId containerID,
+  ContainerState finalState)
   throws InterruptedException, YarnException, IOException {
 waitForContainerState(containerManager, containerID, finalState, 20);
   }
 
-  public static void waitForContainerState(ContainerManagementProtocol 
containerManager,
-  ContainerId containerID, ContainerState finalState, int timeOutMax)
-  throws InterruptedException, YarnException, IOException {
+  public static void waitForContainerState(
+  ContainerManagementProtocol containerManager, ContainerId containerID,
+  ContainerState finalState, int timeOutMax)
+  throws InterruptedException, YarnException, IOException {
 List list = new ArrayList();
 list.add(containerID);
 GetContainerStatusesRequest request =
@@ -314,8 +316,9 @@ public abstract class BaseContainerManagerTest {
   finalState, containerStatus.getState());
   }
 
-  static void waitForApplicationState(ContainerManagerImpl containerManager,
-  ApplicationId appID, ApplicationState finalState)
+  public static void waitForApplicationState(
+  ContainerManagerImpl containerManager, ApplicationId appID,
+  ApplicationState finalState)
   throws InterruptedException {
 // Wait for app-finish
 Application app =
@@ -344,7 +347,16 @@ public abstract class BaseContainerManagerTest {
   public static void waitForNMContainerState(ContainerManagerImpl
   containerManager, ContainerId containerID,
   org.apache.hadoop.yarn.server.nodemanager.containermanager
-  .container.ContainerState finalState, int timeOutMax)
+  .container.ContainerState finalState, int timeOutMax)
+  throws InterruptedException, YarnException, IOException {
+waitForNMContainerState(containerManager, containerID,
+Arrays.asList(finalState), timeOutMax);
+  }
+
+  public static void waitForNMContainerState(ContainerManagerImpl
+  containerManager, ContainerId containerID,
+  List finalStates, int timeOutMax)
   throws InterruptedException, YarnException, IOException {
 Container container = null;
 org.apache.hadoop.yarn.server.nodemanager
@@ -358,15 +370,15 @@ public abstract class BaseContainerManagerTest {
 currentState = container.getContainerState();
   }
   if (currentState != null) {
-LOG.info("Waiting for NM container to get into state " + finalState
-+ ". Current state is " + currentState);
+LOG.info("Waiting for NM container to get into one of 

[24/50] [abbrv] hadoop git commit: HADOOP-13782. Make MutableRates metrics thread-local write, aggregate-on-read. Contributed by Erik Krogen.

2016-11-10 Thread kasha
HADOOP-13782. Make MutableRates metrics thread-local write, aggregate-on-read. 
Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77c13c38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77c13c38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77c13c38

Branch: refs/heads/YARN-4752
Commit: 77c13c385774c51766fe505397fa916754ac08d4
Parents: 2a65eb1
Author: Zhe Zhang 
Authored: Tue Nov 8 16:07:36 2016 -0800
Committer: Zhe Zhang 
Committed: Tue Nov 8 16:07:36 2016 -0800

--
 .../hadoop/ipc/metrics/RpcDetailedMetrics.java  |   4 +-
 .../metrics2/lib/MutableMetricsFactory.java |   5 +
 .../hadoop/metrics2/lib/MutableRates.java   |   6 +
 .../lib/MutableRatesWithAggregation.java| 148 +++
 .../hadoop/metrics2/lib/TestMutableMetrics.java | 148 ++-
 5 files changed, 306 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77c13c38/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
index 7414364..ad36742 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableRates;
+import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 /**
  * This class is for maintaining RPC method related statistics
@@ -34,7 +34,7 @@ import org.apache.hadoop.metrics2.lib.MutableRates;
 @Metrics(about="Per method RPC metrics", context="rpcdetailed")
 public class RpcDetailedMetrics {
 
-  @Metric MutableRates rates;
+  @Metric MutableRatesWithAggregation rates;
 
   static final Log LOG = LogFactory.getLog(RpcDetailedMetrics.class);
   final MetricsRegistry registry;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77c13c38/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
index ae2cb4f..b926c4b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
@@ -67,6 +67,11 @@ public class MutableMetricsFactory {
 if (cls == MutableRates.class) {
   return new MutableRates(registry);
 }
+if (cls == MutableRatesWithAggregation.class) {
+  MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
+  registry.add(info.name(), rates);
+  return rates;
+}
 if (cls == MutableStat.class) {
   return registry.newStat(info.name(), info.description(),
   annotation.sampleName(), annotation.valueName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77c13c38/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
index 121c292..1074e87 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
@@ -33,6 +33,12 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 
 /**
  * Helper class to manage a group of mutable rate metrics
+ *
+ * This class synchronizes all accesses to the metrics it
+ * contains, so it should not be used in situations where
+ * there is high contention on the metrics.
+ * {@link MutableRatesWithAggregation} is preferable in that
+ * situation.
  */
 

[47/50] [abbrv] hadoop git commit: [YARN-4752] YARN-5605. Preempt containers (all on one node) to meet the requirement of starved applications (Contributed by Karthik Kambatla via Daniel Templeton)

2016-11-10 Thread kasha
http://git-wip-us.apache.org/repos/asf/hadoop/blob/07b9bf30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
deleted file mode 100644
index 2cbe507..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
+++ /dev/null
@@ -1,1483 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.event.AsyncDispatcher;
-import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
-import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
-
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
-.TestUtils;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
-import org.apache.hadoop.yarn.util.ControlledClock;
-import org.apache.hadoop.yarn.util.resource.Resources;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
-  private final static String ALLOC_FILE = new File(TEST_DIR,
-  TestFairSchedulerPreemption.class.getName() + ".xml").getAbsolutePath();
-
-  private ControlledClock clock;
-
-  private static class StubbedFairScheduler extends FairScheduler {
-public long lastPreemptMemory = -1;
-
-@Override
-protected void preemptResources(Resource toPreempt) {
-  lastPreemptMemory = toPreempt.getMemorySize();
-}
-
-public void resetLastPreemptResources() {
-  lastPreemptMemory = -1;
-}
-  }
-
-  public Configuration createConfiguration() {
-Configuration conf = super.createConfiguration();
-conf.setClass(YarnConfiguration.RM_SCHEDULER, StubbedFairScheduler.class,
-ResourceScheduler.class);
-conf.setBoolean(FairSchedulerConfiguration.PREEMPTION, true);
-conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, 

[13/50] [abbrv] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-10 Thread kasha
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index a69af6e..fd0c68b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -47,8 +47,13 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesLogger;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSetUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 import java.io.IOException;
@@ -71,12 +76,10 @@ public class ParentQueue extends AbstractCSQueue {
 
   protected final Set childQueues;  
   private final boolean rootQueue;
-  final Comparator nonPartitionedQueueComparator;
-  final PartitionedQueueComparator partitionQueueComparator;
-  volatile int numApplications;
+  private final Comparator nonPartitionedQueueComparator;
+  private final PartitionedQueueComparator partitionQueueComparator;
+  private volatile int numApplications;
   private final CapacitySchedulerContext scheduler;
-  private boolean needToResortQueuesAtNextAllocation = false;
-  private int offswitchPerHeartbeatLimit;
 
   private final RecordFactory recordFactory = 
 RecordFactoryProvider.getRecordFactory(null);
@@ -86,7 +89,7 @@ public class ParentQueue extends AbstractCSQueue {
 super(cs, queueName, parent, old);
 this.scheduler = cs;
 this.nonPartitionedQueueComparator = cs.getNonPartitionedQueueComparator();
-this.partitionQueueComparator = cs.getPartitionedQueueComparator();
+this.partitionQueueComparator = new PartitionedQueueComparator();
 
 this.rootQueue = (parent == null);
 
@@ -126,16 +129,12 @@ public class ParentQueue extends AbstractCSQueue {
 }
   }
 
-  offswitchPerHeartbeatLimit =
-csContext.getConfiguration().getOffSwitchPerHeartbeatLimit();
-
   LOG.info(queueName + ", capacity=" + this.queueCapacities.getCapacity()
   + ", absoluteCapacity=" + this.queueCapacities.getAbsoluteCapacity()
   + ", maxCapacity=" + this.queueCapacities.getMaximumCapacity()
   + ", absoluteMaxCapacity=" + this.queueCapacities
   .getAbsoluteMaximumCapacity() + ", state=" + state + ", acls="
   + aclsString + ", labels=" + labelStrBuilder.toString() + "\n"
-  + ", offswitchPerHeartbeatLimit = " + getOffSwitchPerHeartbeatLimit()
   + ", reservationsContinueLooking=" + reservationsContinueLooking);
 } finally {
   writeLock.unlock();
@@ -215,11 +214,6 @@ public class ParentQueue extends AbstractCSQueue {
 
   }
 
-  @Private
-  public int getOffSwitchPerHeartbeatLimit() {
-return offswitchPerHeartbeatLimit;
-  }
-
   private QueueUserACLInfo getUserAclInfo(
   UserGroupInformation user) {
 try {
@@ -435,156 +429,145 @@ public class ParentQueue extends AbstractCSQueue {
 
   @Override
   public CSAssignment assignContainers(Resource clusterResource,
-  FiCaSchedulerNode node, ResourceLimits resourceLimits,
-  SchedulingMode schedulingMode) {
-int offswitchCount = 0;
-try {
-  writeLock.lock();
-  // if our queue cannot access this node, just return
-  if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY
-  && 

[23/50] [abbrv] hadoop git commit: HADOOP-13802. Make generic options help more consistent, and aligned. Contributed by Grant Sohn

2016-11-10 Thread kasha
HADOOP-13802. Make generic options help more consistent, and aligned. 
Contributed by Grant Sohn


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a65eb12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a65eb12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a65eb12

Branch: refs/heads/YARN-4752
Commit: 2a65eb121e23243fcb642d28b3f74241536485d8
Parents: 29e3b34
Author: Mingliang Liu 
Authored: Tue Nov 8 14:41:58 2016 -0800
Committer: Mingliang Liu 
Committed: Tue Nov 8 15:40:22 2016 -0800

--
 .../hadoop/util/GenericOptionsParser.java   | 37 
 1 file changed, 22 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a65eb12/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index 170812b..d98de56 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -567,21 +567,28 @@ public class GenericOptionsParser {
* @param out stream to print the usage message to.
*/
   public static void printGenericCommandUsage(PrintStream out) {
-
-out.println("Generic options supported are");
-out.println("-conf  specify an application 
configuration file");
-out.println("-D 

[43/50] [abbrv] hadoop git commit: YARN-5453. FairScheduler#update may skip update demand resource of child queue/app if current demand reached maxResource. (sandflee via kasha)

2016-11-10 Thread kasha
YARN-5453. FairScheduler#update may skip update demand resource of child 
queue/app if current demand reached maxResource. (sandflee via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86ac1ad9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86ac1ad9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86ac1ad9

Branch: refs/heads/YARN-4752
Commit: 86ac1ad9fd65c7dd12278372b369de38dc4616db
Parents: c8bc7a8
Author: Karthik Kambatla 
Authored: Wed Nov 9 23:44:02 2016 -0800
Committer: Karthik Kambatla 
Committed: Wed Nov 9 23:44:02 2016 -0800

--
 .../scheduler/fair/FSLeafQueue.java | 15 +++
 .../scheduler/fair/FSParentQueue.java   |  6 +--
 .../scheduler/fair/TestFairScheduler.java   | 41 
 3 files changed, 48 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86ac1ad9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 9d5bbe5..c393759 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -268,20 +268,16 @@ public class FSLeafQueue extends FSQueue {
 readLock.lock();
 try {
   for (FSAppAttempt sched : runnableApps) {
-if (Resources.equals(demand, maxShare)) {
-  break;
-}
-updateDemandForApp(sched, maxShare);
+updateDemandForApp(sched);
   }
   for (FSAppAttempt sched : nonRunnableApps) {
-if (Resources.equals(demand, maxShare)) {
-  break;
-}
-updateDemandForApp(sched, maxShare);
+updateDemandForApp(sched);
   }
 } finally {
   readLock.unlock();
 }
+// Cap demand to maxShare to limit allocation to maxShare
+demand = Resources.componentwiseMin(demand, maxShare);
 if (LOG.isDebugEnabled()) {
   LOG.debug("The updated demand for " + getName() + " is " + demand
   + "; the max is " + maxShare);
@@ -290,7 +286,7 @@ public class FSLeafQueue extends FSQueue {
 }
   }
   
-  private void updateDemandForApp(FSAppAttempt sched, Resource maxRes) {
+  private void updateDemandForApp(FSAppAttempt sched) {
 sched.updateDemand();
 Resource toAdd = sched.getDemand();
 if (LOG.isDebugEnabled()) {
@@ -299,7 +295,6 @@ public class FSLeafQueue extends FSQueue {
   + demand);
 }
 demand = Resources.add(demand, toAdd);
-demand = Resources.componentwiseMin(demand, maxRes);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86ac1ad9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index d05390b..53ac8c9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -159,16 +159,14 @@ public class FSParentQueue extends FSQueue {
 childQueue.updateDemand();
 Resource toAdd = childQueue.getDemand();
 demand = Resources.add(demand, toAdd);
-demand = Resources.componentwiseMin(demand, maxShare);
 if (LOG.isDebugEnabled()) {
   LOG.debug("Counting resource from " + 

[12/50] [abbrv] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-10 Thread kasha
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
new file mode 100644
index 000..8b4907b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common;
+
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey;
+
+/**
+ * Contexts for a container inside scheduler
+ */
+public class SchedulerContainer {
+  private RMContainer rmContainer;
+  private String nodePartition;
+  private A schedulerApplicationAttempt;
+  private N schedulerNode;
+  private boolean allocated; // Allocated (True) or reserved (False)
+
+  public SchedulerContainer(A app, N node, RMContainer rmContainer,
+  String nodePartition, boolean allocated) {
+this.schedulerApplicationAttempt = app;
+this.schedulerNode = node;
+this.rmContainer = rmContainer;
+this.nodePartition = nodePartition;
+this.allocated = allocated;
+  }
+
+  public String getNodePartition() {
+return nodePartition;
+  }
+
+  public RMContainer getRmContainer() {
+return rmContainer;
+  }
+
+  public A getSchedulerApplicationAttempt() {
+return schedulerApplicationAttempt;
+  }
+
+  public N getSchedulerNode() {
+return schedulerNode;
+  }
+
+  public boolean isAllocated() {
+return allocated;
+  }
+
+  public SchedulerRequestKey getSchedulerRequestKey() {
+if (rmContainer.getState() == RMContainerState.RESERVED) {
+  return rmContainer.getReservedSchedulerKey();
+}
+return rmContainer.getAllocatedSchedulerKey();
+  }
+
+  @Override
+  public String toString() {
+return "(Application=" + schedulerApplicationAttempt
+.getApplicationAttemptId() + "; Node=" + schedulerNode.getNodeID()
++ "; Resource=" + rmContainer.getAllocatedOrReservedResource() + ")";
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index ebe70d4..6d9dda8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -18,14 +18,7 @@
 
 package 

[04/50] [abbrv] hadoop git commit: HDFS-10970. Update jackson from 1.9.13 to 2.x in hadoop-hdfs.

2016-11-10 Thread kasha
HDFS-10970. Update jackson from 1.9.13 to 2.x in hadoop-hdfs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/049e7d27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/049e7d27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/049e7d27

Branch: refs/heads/YARN-4752
Commit: 049e7d27bea13d4254baccf49401daae820b71df
Parents: 59bc84a
Author: Akira Ajisaka 
Authored: Mon Nov 7 11:16:31 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Nov 7 11:16:31 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml|  4 
 .../hdfs/server/datanode/DiskBalancerWorkItem.java|  4 ++--
 .../hdfs/server/datanode/DiskBalancerWorkStatus.java  | 13 ++---
 .../hadoop/hdfs/util/CombinedHostsFileReader.java |  8 
 .../hadoop/hdfs/util/CombinedHostsFileWriter.java |  2 +-
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java|  4 ++--
 .../org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java |  4 ++--
 .../ConfRefreshTokenBasedAccessTokenProvider.java |  4 ++--
 .../oauth2/CredentialBasedAccessTokenProvider.java|  4 ++--
 .../org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java |  2 +-
 .../TestClientCredentialTimeBasedTokenRefresher.java  |  2 +-
 .../TestRefreshTokenTimeBasedTokenRefresher.java  |  2 +-
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml   | 10 --
 hadoop-hdfs-project/hadoop-hdfs/pom.xml   | 14 --
 .../server/datanode/fsdataset/impl/FsVolumeImpl.java  |  8 
 .../hdfs/server/diskbalancer/command/Command.java |  4 ++--
 .../diskbalancer/connectors/JsonNodeConnector.java|  4 ++--
 .../diskbalancer/datamodel/DiskBalancerCluster.java   |  8 
 .../diskbalancer/datamodel/DiskBalancerVolume.java|  8 
 .../diskbalancer/datamodel/DiskBalancerVolumeSet.java |  6 +++---
 .../hdfs/server/diskbalancer/planner/NodePlan.java|  6 +++---
 .../hdfs/server/namenode/StartupProgressServlet.java  |  6 +++---
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java |  2 +-
 .../hdfs/server/datanode/TestDataNodeMXBean.java  |  4 ++--
 .../hdfs/server/diskbalancer/TestDiskBalancerRPC.java |  2 +-
 .../hdfs/server/namenode/TestNameNodeMXBean.java  |  2 +-
 .../java/org/apache/hadoop/hdfs/web/TestJsonUtil.java |  4 ++--
 27 files changed, 64 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/049e7d27/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 1e38019..193f423 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -111,6 +111,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   com.fasterxml.jackson.core
   jackson-annotations
   
+
+  com.fasterxml.jackson.core
+  jackson-databind
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/049e7d27/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
index 909bbd5..0cdd107 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
@@ -20,11 +20,11 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectReader;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.ObjectReader;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/049e7d27/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
 

[15/50] [abbrv] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-10 Thread kasha
YARN-5716. Add global scheduler interface definition and update 
CapacityScheduler to use it. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de3b4aac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de3b4aac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de3b4aac

Branch: refs/heads/YARN-4752
Commit: de3b4aac561258ad242a3c5ed1c919428893fd4c
Parents: acd509d
Author: Jian He 
Authored: Mon Nov 7 10:14:39 2016 -0800
Committer: Jian He 
Committed: Mon Nov 7 10:14:39 2016 -0800

--
 .../dev-support/findbugs-exclude.xml|   9 +
 .../rmcontainer/RMContainer.java|  13 +
 .../rmcontainer/RMContainerImpl.java|  78 +-
 .../scheduler/AppSchedulingInfo.java| 168 +++-
 .../scheduler/SchedulerApplicationAttempt.java  |  73 +-
 .../scheduler/activities/ActivitiesLogger.java  |  17 +-
 .../scheduler/activities/ActivitiesManager.java |   7 +-
 .../scheduler/capacity/AbstractCSQueue.java |  71 ++
 .../scheduler/capacity/CSAssignment.java|  33 +
 .../scheduler/capacity/CSQueue.java |  19 +-
 .../scheduler/capacity/CapacityScheduler.java   | 773 ++-
 .../CapacitySchedulerConfiguration.java |   4 +
 .../scheduler/capacity/LeafQueue.java   | 451 ++-
 .../scheduler/capacity/ParentQueue.java | 428 +-
 .../allocator/AbstractContainerAllocator.java   |  39 +-
 .../capacity/allocator/ContainerAllocation.java |  12 +-
 .../capacity/allocator/ContainerAllocator.java  |  15 +-
 .../allocator/IncreaseContainerAllocator.java   |  89 +--
 .../allocator/RegularContainerAllocator.java| 215 +++---
 .../scheduler/common/AssignmentInformation.java |  44 +-
 .../common/ContainerAllocationProposal.java | 111 +++
 .../common/ResourceAllocationCommitter.java |  29 +
 .../scheduler/common/ResourceCommitRequest.java | 164 
 .../scheduler/common/SchedulerContainer.java|  80 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java | 624 ---
 .../scheduler/fifo/FifoAppAttempt.java  | 110 +++
 .../scheduler/fifo/FifoScheduler.java   |  55 +-
 .../scheduler/placement/PlacementSet.java   |  65 ++
 .../scheduler/placement/PlacementSetUtils.java  |  36 +
 .../placement/ResourceRequestUpdateResult.java  |  43 ++
 .../placement/SchedulingPlacementSet.java   |  90 +++
 .../scheduler/placement/SimplePlacementSet.java |  70 ++
 .../AbstractComparatorOrderingPolicy.java   |   4 +-
 .../scheduler/policy/FairOrderingPolicy.java|   3 +-
 .../scheduler/policy/FifoOrderingPolicy.java|   4 +-
 .../FifoOrderingPolicyForPendingApps.java   |   3 +-
 .../yarn/server/resourcemanager/MockRM.java |  47 +-
 .../resourcemanager/TestClientRMService.java|   2 +-
 .../scheduler/TestSchedulerHealth.java  |   6 +-
 .../capacity/TestCapacityScheduler.java |  56 +-
 .../TestCapacitySchedulerAsyncScheduling.java   | 143 
 .../scheduler/capacity/TestChildQueueOrder.java |  21 +-
 .../capacity/TestContainerAllocation.java   |  45 +-
 .../capacity/TestContainerResizing.java |  10 +-
 .../scheduler/capacity/TestLeafQueue.java   | 647 +++-
 .../scheduler/capacity/TestParentQueue.java | 209 +++--
 .../scheduler/capacity/TestReservations.java| 277 +--
 .../scheduler/capacity/TestUtils.java   |  26 +
 .../TestRMWebServicesSchedulerActivities.java   |   8 +-
 49 files changed, 4212 insertions(+), 1334 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 01b1da7..ab36a4e 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -574,4 +574,13 @@
     
 
   
+
+
+  
+  
+
+
+
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
 

[14/50] [abbrv] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-10 Thread kasha
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d759d47..7e98f10 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -32,7 +32,9 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.lang.StringUtils;
@@ -112,7 +114,11 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Alloca
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceAllocationCommitter;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
@@ -128,6 +134,9 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourc
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSetUtils;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
@@ -142,11 +151,12 @@ import com.google.common.base.Preconditions;
 @SuppressWarnings("unchecked")
 public class CapacityScheduler extends
 AbstractYarnScheduler implements
-PreemptableResourceScheduler, CapacitySchedulerContext, Configurable {
+PreemptableResourceScheduler, CapacitySchedulerContext, Configurable,
+ResourceAllocationCommitter {
 
   private static final Log LOG = LogFactory.getLog(CapacityScheduler.class);
   private YarnAuthorizationProvider authorizer;
- 
+
   private CSQueue root;
   // timeout to join when we stop this service
   protected final long THREAD_JOIN_TIMEOUT_MS = 1000;
@@ -155,6 +165,8 @@ public class CapacityScheduler extends
 
   private volatile boolean isLazyPreemptionEnabled = false;
 
+  private int offswitchPerHeartbeatLimit;
+
   static final Comparator nonPartitionedQueueComparator =
   new Comparator() {
 @Override
@@ -176,7 +188,7 @@ public class CapacityScheduler extends
   public void setConf(Configuration conf) {
   yarnConf = conf;
   }
-  
+
   private void validateConf(Configuration conf) {
 // validate scheduler memory allocation setting
 int minMem = conf.getInt(
@@ -229,7 +241,8 @@ public class CapacityScheduler extends
   private boolean usePortForNodeName;
 
   private boolean scheduleAsynchronously;
-  private AsyncScheduleThread asyncSchedulerThread;
+  private List asyncSchedulerThreads;
+  private 

[19/50] [abbrv] hadoop git commit: MAPREDUCE-6782. JHS task page search based on each individual column not working (Ajith S via Varun Saxena)

2016-11-10 Thread kasha
MAPREDUCE-6782. JHS task page search based on each individual column not 
working (Ajith S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/026b39ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/026b39ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/026b39ad

Branch: refs/heads/YARN-4752
Commit: 026b39ad9d9336b1efdd8e2ffb9a508cb0db6259
Parents: f38a6d0
Author: Varun Saxena 
Authored: Tue Nov 8 14:57:56 2016 +0530
Committer: Varun Saxena 
Committed: Tue Nov 8 14:57:56 2016 +0530

--
 .../org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/026b39ad/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
index 42507d0..3c3386e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
@@ -96,7 +96,8 @@ public class HsTasksPage extends HsView {
   private String jobsPostTableInit() {
 return "var asInitVals = new Array();\n" +
"$('tfoot input').keyup( function () \n{"+
-   "  tasksDataTable.fnFilter( this.value, $('tfoot 
input').index(this) );\n"+
+   "  $('.dt-tasks').dataTable().fnFilter("+
+   " this.value, $('tfoot input').index(this) );\n"+
"} );\n"+
"$('tfoot input').each( function (i) {\n"+
"  asInitVals[i] = this.value;\n"+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: HADOOP-13346. DelegationTokenAuthenticationHandler writes via closed writer. Contributed by Gregory Chanan and Hrishikesh Gadre.

2016-11-10 Thread kasha
HADOOP-13346. DelegationTokenAuthenticationHandler writes via closed writer. 
Contributed by Gregory Chanan and Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/822ae88f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/822ae88f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/822ae88f

Branch: refs/heads/YARN-4752
Commit: 822ae88f7da638e15a25747f6965caee8198aca6
Parents: c619e9b
Author: Xiao Chen 
Authored: Wed Nov 9 09:32:15 2016 -0800
Committer: Xiao Chen 
Committed: Wed Nov 9 09:33:00 2016 -0800

--
 .../DelegationTokenAuthenticationHandler.java   | 32 -
 ...tionTokenAuthenticationHandlerWithMocks.java | 50 
 2 files changed, 81 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/822ae88f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index c23a94f..315c9d6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -48,6 +48,8 @@ import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonGenerator;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -89,6 +91,8 @@ public abstract class DelegationTokenAuthenticationHandler
   public static final String DELEGATION_TOKEN_UGI_ATTRIBUTE =
   "hadoop.security.delegation-token.ugi";
 
+  public static final String JSON_MAPPER_PREFIX = PREFIX + "json-mapper.";
+
   static {
 DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
 DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
@@ -101,6 +105,7 @@ public abstract class DelegationTokenAuthenticationHandler
   private AuthenticationHandler authHandler;
   private DelegationTokenManager tokenManager;
   private String authType;
+  private JsonFactory jsonFactory;
 
   public DelegationTokenAuthenticationHandler(AuthenticationHandler handler) {
 authHandler = handler;
@@ -120,6 +125,7 @@ public abstract class DelegationTokenAuthenticationHandler
   public void init(Properties config) throws ServletException {
 authHandler.init(config);
 initTokenManager(config);
+initJsonFactory(config);
   }
 
   /**
@@ -153,6 +159,30 @@ public abstract class DelegationTokenAuthenticationHandler
 tokenManager.init();
   }
 
+  @VisibleForTesting
+  public void initJsonFactory(Properties config) {
+boolean hasFeature = false;
+JsonFactory tmpJsonFactory = new JsonFactory();
+
+for (Map.Entry entry : config.entrySet()) {
+  String key = (String)entry.getKey();
+  if (key.startsWith(JSON_MAPPER_PREFIX)) {
+JsonGenerator.Feature feature =
+JsonGenerator.Feature.valueOf(key.substring(JSON_MAPPER_PREFIX
+.length()));
+if (feature != null) {
+  hasFeature = true;
+  boolean enabled = Boolean.parseBoolean((String)entry.getValue());
+  tmpJsonFactory.configure(feature, enabled);
+}
+  }
+}
+
+if (hasFeature) {
+  jsonFactory = tmpJsonFactory;
+}
+  }
+
   @Override
   public void destroy() {
 tokenManager.destroy();
@@ -298,7 +328,7 @@ public abstract class DelegationTokenAuthenticationHandler
 if (map != null) {
   response.setContentType(MediaType.APPLICATION_JSON);
   Writer writer = response.getWriter();
-  ObjectMapper jsonMapper = new ObjectMapper();
+  ObjectMapper jsonMapper = new ObjectMapper(jsonFactory);
   jsonMapper.writeValue(writer, map);
   writer.write(ENTER);
   writer.flush();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/822ae88f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java

hadoop git commit: YARN-4218. Metric for resource*time that was preempted. Contributed by Chang Li.

2016-11-10 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d81706cd9 -> 1a6a5af44


YARN-4218. Metric for resource*time that was preempted. Contributed by Chang Li.

(cherry picked from commit dd5b9dabf90dbd68afdc87421101a5f4bad357d9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a6a5af4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a6a5af4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a6a5af4

Branch: refs/heads/branch-2.8
Commit: 1a6a5af44a7a820cfacffe16521bbe71c968077e
Parents: d81706c
Author: Eric Payne 
Authored: Thu Nov 10 23:02:42 2016 +
Committer: Eric Payne 
Committed: Thu Nov 10 23:32:17 2016 +

--
 .../records/ApplicationResourceUsageReport.java | 43 +++-
 .../src/main/proto/yarn_protos.proto|  2 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  7 
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  4 +-
 .../ApplicationResourceUsageReportPBImpl.java   | 28 +
 ...pplicationHistoryManagerOnTimelineStore.java |  8 +++-
 ...pplicationHistoryManagerOnTimelineStore.java | 11 -
 .../metrics/ApplicationMetricsConstants.java|  6 +++
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  5 ++-
 .../server/resourcemanager/RMAppManager.java|  2 +
 .../server/resourcemanager/RMServerUtils.java   |  2 +-
 .../metrics/SystemMetricsPublisher.java |  6 ++-
 .../resourcemanager/recovery/RMStateStore.java  | 10 +++--
 .../records/ApplicationAttemptStateData.java| 39 --
 .../pb/ApplicationAttemptStateDataPBImpl.java   | 24 +++
 .../server/resourcemanager/rmapp/RMAppImpl.java | 13 +-
 .../resourcemanager/rmapp/RMAppMetrics.java | 16 +++-
 .../rmapp/attempt/RMAppAttemptImpl.java | 11 -
 .../rmapp/attempt/RMAppAttemptMetrics.java  | 16 
 .../rmcontainer/RMContainerImpl.java| 15 ---
 .../scheduler/SchedulerApplicationAttempt.java  |  4 +-
 .../resourcemanager/webapp/RMAppBlock.java  |  8 +++-
 .../resourcemanager/webapp/dao/AppInfo.java | 12 ++
 .../yarn_server_resourcemanager_recovery.proto  |  2 +
 .../server/resourcemanager/TestAppManager.java  |  3 +-
 .../applicationsmanager/MockAsm.java|  4 +-
 .../metrics/TestSystemMetricsPublisher.java | 13 +-
 .../recovery/RMStateStoreTestBase.java  |  4 +-
 .../recovery/TestZKRMStateStore.java|  2 +-
 .../resourcemanager/webapp/TestAppPage.java |  3 +-
 .../webapp/TestRMWebAppFairScheduler.java   |  3 +-
 .../webapp/TestRMWebServicesApps.java   |  2 +-
 32 files changed, 292 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a6a5af4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
index 34efee8..3cf8f3d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
@@ -36,7 +36,8 @@ public abstract class ApplicationResourceUsageReport {
   public static ApplicationResourceUsageReport newInstance(
   int numUsedContainers, int numReservedContainers, Resource usedResources,
   Resource reservedResources, Resource neededResources, long memorySeconds,
-  long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc) {
+  long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc,
+  long preemptedMemorySeconds, long preemptedVcoresSeconds) {
 ApplicationResourceUsageReport report =
 Records.newRecord(ApplicationResourceUsageReport.class);
 report.setNumUsedContainers(numUsedContainers);
@@ -48,6 +49,8 @@ public abstract class ApplicationResourceUsageReport {
 report.setVcoreSeconds(vcoreSeconds);
 report.setQueueUsagePercentage(queueUsagePerc);
 report.setClusterUsagePercentage(clusterUsagePerc);
+report.setPreemptedMemorySeconds(preemptedMemorySeconds);
+report.setPreemptedVcoreSeconds(preemptedVcoresSeconds);
 return report;
   }
 
@@ -188,4 +191,42 @@ public abstract class ApplicationResourceUsageReport {
   @Private
   @Unstable
   public abstract void setClusterUsagePercentage(float clusterUsagePerc);
+
+  /**
+   * Set the 

hadoop git commit: YARN-4218. Metric for resource*time that was preempted. Contributed by Chang Li.

2016-11-10 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f4c8dc478 -> dd5b9dabf


YARN-4218. Metric for resource*time that was preempted. Contributed by Chang Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd5b9dab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd5b9dab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd5b9dab

Branch: refs/heads/branch-2
Commit: dd5b9dabf90dbd68afdc87421101a5f4bad357d9
Parents: f4c8dc4
Author: Eric Payne 
Authored: Thu Nov 10 23:02:42 2016 +
Committer: Eric Payne 
Committed: Thu Nov 10 23:02:42 2016 +

--
 .../records/ApplicationResourceUsageReport.java | 43 +++-
 .../src/main/proto/yarn_protos.proto|  2 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  7 
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  4 +-
 .../ApplicationResourceUsageReportPBImpl.java   | 28 +
 ...pplicationHistoryManagerOnTimelineStore.java |  8 +++-
 ...pplicationHistoryManagerOnTimelineStore.java | 11 -
 .../metrics/ApplicationMetricsConstants.java|  6 +++
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  5 ++-
 .../server/resourcemanager/RMAppManager.java|  2 +
 .../server/resourcemanager/RMServerUtils.java   |  2 +-
 .../metrics/SystemMetricsPublisher.java |  6 ++-
 .../resourcemanager/recovery/RMStateStore.java  | 10 +++--
 .../records/ApplicationAttemptStateData.java| 39 --
 .../pb/ApplicationAttemptStateDataPBImpl.java   | 24 +++
 .../server/resourcemanager/rmapp/RMAppImpl.java | 13 +-
 .../resourcemanager/rmapp/RMAppMetrics.java | 16 +++-
 .../rmapp/attempt/RMAppAttemptImpl.java | 11 -
 .../rmapp/attempt/RMAppAttemptMetrics.java  | 16 
 .../rmcontainer/RMContainerImpl.java| 15 ---
 .../scheduler/SchedulerApplicationAttempt.java  |  2 +-
 .../resourcemanager/webapp/RMAppBlock.java  |  8 +++-
 .../resourcemanager/webapp/dao/AppInfo.java | 11 +
 .../yarn_server_resourcemanager_recovery.proto  |  2 +
 .../server/resourcemanager/TestAppManager.java  |  3 +-
 .../applicationsmanager/MockAsm.java|  4 +-
 .../metrics/TestSystemMetricsPublisher.java | 13 +-
 .../recovery/RMStateStoreTestBase.java  |  4 +-
 .../recovery/TestZKRMStateStore.java|  2 +-
 .../resourcemanager/webapp/TestAppPage.java |  3 +-
 .../webapp/TestRMWebAppFairScheduler.java   |  3 +-
 .../webapp/TestRMWebServicesApps.java   |  2 +-
 32 files changed, 290 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5b9dab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
index 34efee8..3cf8f3d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
@@ -36,7 +36,8 @@ public abstract class ApplicationResourceUsageReport {
   public static ApplicationResourceUsageReport newInstance(
   int numUsedContainers, int numReservedContainers, Resource usedResources,
   Resource reservedResources, Resource neededResources, long memorySeconds,
-  long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc) {
+  long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc,
+  long preemptedMemorySeconds, long preemptedVcoresSeconds) {
 ApplicationResourceUsageReport report =
 Records.newRecord(ApplicationResourceUsageReport.class);
 report.setNumUsedContainers(numUsedContainers);
@@ -48,6 +49,8 @@ public abstract class ApplicationResourceUsageReport {
 report.setVcoreSeconds(vcoreSeconds);
 report.setQueueUsagePercentage(queueUsagePerc);
 report.setClusterUsagePercentage(clusterUsagePerc);
+report.setPreemptedMemorySeconds(preemptedMemorySeconds);
+report.setPreemptedVcoreSeconds(preemptedVcoresSeconds);
 return report;
   }
 
@@ -188,4 +191,42 @@ public abstract class ApplicationResourceUsageReport {
   @Private
   @Unstable
   public abstract void setClusterUsagePercentage(float clusterUsagePerc);
+
+  /**
+   * Set the aggregated amount of memory preempted (in megabytes)
+   * the application has 

hadoop git commit: HDFS-8307. Spurious DNS Queries from hdfs shell. Contributed by Andres Perez

2016-11-10 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 d76273036 -> 95edf265b


HDFS-8307. Spurious DNS Queries from hdfs shell. Contributed by  Andres Perez


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95edf265
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95edf265
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95edf265

Branch: refs/heads/branch-2.7
Commit: 95edf265b3878784a6f239a5d46403a425b534a4
Parents: d762730
Author: Anu Engineer 
Authored: Thu Nov 10 14:38:53 2016 -0800
Committer: Anu Engineer 
Committed: Thu Nov 10 14:38:53 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  2 ++
 .../java/org/apache/hadoop/hdfs/NameNodeProxies.java  | 14 --
 2 files changed, 14 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95edf265/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f33e27d..5363aa8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -67,6 +67,8 @@ Release 2.7.4 - UNRELEASED
 (Erik Krogen via Zhe Zhang)
 
   BUG FIXES
+   
+HDFS-8307. Spurious DNS Queries from hdfs shell. (Andres Perez via 
aengineer)
 
 HDFS-9696. Garbage snapshot records linger forever. (kihwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95edf265/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index c7e2cf2..18ec688 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -192,8 +192,18 @@ public class NameNodeProxies {
 dtService = SecurityUtil.buildTokenService(
 NameNode.getAddress(nameNodeUri));
   }
-  return new ProxyAndInfo(proxy, dtService,
-  NameNode.getAddress(nameNodeUri));
+
+  InetSocketAddress nnAddress;
+
+  //We dont need to resolve the address if is it a Nameservice ID
+  if(HAUtil.isLogicalUri(conf, nameNodeUri)) {
+nnAddress = InetSocketAddress.createUnresolved(
+nameNodeUri.getHost(), NameNode.DEFAULT_PORT);
+  } else {
+nnAddress = NameNode.getAddress(nameNodeUri);
+  }
+
+  return new ProxyAndInfo(proxy, dtService, nnAddress);
 }
   }
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-4218. Metric for resource*time that was preempted. Contributed by Chang Li.

2016-11-10 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3a9841953 -> 93eeb1316


YARN-4218. Metric for resource*time that was preempted. Contributed by Chang Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93eeb131
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93eeb131
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93eeb131

Branch: refs/heads/trunk
Commit: 93eeb13164707d0e3556c2bf737bd2ee09a335c6
Parents: 3a98419
Author: Eric Payne 
Authored: Thu Nov 10 22:35:12 2016 +
Committer: Eric Payne 
Committed: Thu Nov 10 22:35:12 2016 +

--
 .../records/ApplicationResourceUsageReport.java | 43 +++-
 .../src/main/proto/yarn_protos.proto|  2 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  7 
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  4 +-
 .../ApplicationResourceUsageReportPBImpl.java   | 28 +
 ...pplicationHistoryManagerOnTimelineStore.java |  8 +++-
 ...pplicationHistoryManagerOnTimelineStore.java | 11 -
 .../metrics/ApplicationMetricsConstants.java|  6 +++
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  5 ++-
 .../server/resourcemanager/RMAppManager.java|  2 +
 .../server/resourcemanager/RMServerUtils.java   |  2 +-
 .../metrics/TimelineServiceV1Publisher.java |  4 ++
 .../metrics/TimelineServiceV2Publisher.java |  6 +++
 .../resourcemanager/recovery/RMStateStore.java  | 10 +++--
 .../records/ApplicationAttemptStateData.java| 39 --
 .../pb/ApplicationAttemptStateDataPBImpl.java   | 24 +++
 .../server/resourcemanager/rmapp/RMAppImpl.java | 13 +-
 .../resourcemanager/rmapp/RMAppMetrics.java | 16 +++-
 .../rmapp/attempt/RMAppAttemptImpl.java | 11 -
 .../rmapp/attempt/RMAppAttemptMetrics.java  | 16 
 .../rmcontainer/RMContainerImpl.java| 16 
 .../scheduler/SchedulerApplicationAttempt.java  |  2 +-
 .../resourcemanager/webapp/RMAppBlock.java  |  8 +++-
 .../resourcemanager/webapp/dao/AppInfo.java | 11 +
 .../yarn_server_resourcemanager_recovery.proto  |  2 +
 .../server/resourcemanager/TestAppManager.java  |  3 +-
 .../applicationsmanager/MockAsm.java|  4 +-
 .../metrics/TestSystemMetricsPublisher.java | 13 +-
 .../TestSystemMetricsPublisherForV2.java|  4 +-
 .../recovery/RMStateStoreTestBase.java  |  4 +-
 .../recovery/TestZKRMStateStore.java|  2 +-
 .../resourcemanager/webapp/TestAppPage.java |  3 +-
 .../webapp/TestRMWebAppFairScheduler.java   |  3 +-
 .../webapp/TestRMWebServicesApps.java   |  2 +-
 34 files changed, 297 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93eeb131/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
index 34efee8..3cf8f3d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
@@ -36,7 +36,8 @@ public abstract class ApplicationResourceUsageReport {
   public static ApplicationResourceUsageReport newInstance(
   int numUsedContainers, int numReservedContainers, Resource usedResources,
   Resource reservedResources, Resource neededResources, long memorySeconds,
-  long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc) {
+  long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc,
+  long preemptedMemorySeconds, long preemptedVcoresSeconds) {
 ApplicationResourceUsageReport report =
 Records.newRecord(ApplicationResourceUsageReport.class);
 report.setNumUsedContainers(numUsedContainers);
@@ -48,6 +49,8 @@ public abstract class ApplicationResourceUsageReport {
 report.setVcoreSeconds(vcoreSeconds);
 report.setQueueUsagePercentage(queueUsagePerc);
 report.setClusterUsagePercentage(clusterUsagePerc);
+report.setPreemptedMemorySeconds(preemptedMemorySeconds);
+report.setPreemptedVcoreSeconds(preemptedVcoresSeconds);
 return report;
   }
 
@@ -188,4 +191,42 @@ public abstract class ApplicationResourceUsageReport {
   @Private
   @Unstable
   public abstract void setClusterUsagePercentage(float 

hadoop git commit: YARN-5834. TestNodeStatusUpdater.testNMRMConnectionConf compares nodemanager wait time to the incorrect value. (Chang Li via kasha)

2016-11-10 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d064d5dbb -> f4c8dc478


YARN-5834. TestNodeStatusUpdater.testNMRMConnectionConf compares nodemanager 
wait time to the incorrect value. (Chang Li via kasha)

(cherry picked from commit 3a98419532687e4362ffc26abbc1264232820db7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4c8dc47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4c8dc47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4c8dc47

Branch: refs/heads/branch-2
Commit: f4c8dc4783e95cf299efdf79c00499b62e3a5670
Parents: d064d5d
Author: Karthik Kambatla 
Authored: Thu Nov 10 14:08:51 2016 -0800
Committer: Karthik Kambatla 
Committed: Thu Nov 10 14:09:33 2016 -0800

--
 .../hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4c8dc47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index bc205b9..d76aa35 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -1523,13 +1523,13 @@ public class TestNodeStatusUpdater {
   long t = System.currentTimeMillis();
   long duration = t - waitStartTime;
   boolean waitTimeValid = (duration >= nmRmConnectionWaitMs) &&
-  (duration < (connectionWaitMs + delta));
+  (duration < (nmRmConnectionWaitMs + delta));
 
   if(!waitTimeValid) {
 // throw exception if NM doesn't retry long enough
 throw new Exception("NM should have tried re-connecting to RM during " 
+
-  "period of at least " + connectionWaitMs + " ms, but " +
-  "stopped retrying within " + (connectionWaitMs + delta) +
+  "period of at least " + nmRmConnectionWaitMs + " ms, but " +
+  "stopped retrying within " + (nmRmConnectionWaitMs + delta) +
   " ms: " + e, e);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5834. TestNodeStatusUpdater.testNMRMConnectionConf compares nodemanager wait time to the incorrect value. (Chang Li via kasha)

2016-11-10 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 89354f047 -> 3a9841953


YARN-5834. TestNodeStatusUpdater.testNMRMConnectionConf compares nodemanager 
wait time to the incorrect value. (Chang Li via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a984195
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a984195
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a984195

Branch: refs/heads/trunk
Commit: 3a98419532687e4362ffc26abbc1264232820db7
Parents: 89354f0
Author: Karthik Kambatla 
Authored: Thu Nov 10 14:08:51 2016 -0800
Committer: Karthik Kambatla 
Committed: Thu Nov 10 14:08:51 2016 -0800

--
 .../hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a984195/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index 977cb76..59a4563 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -1524,13 +1524,13 @@ public class TestNodeStatusUpdater {
   long t = System.currentTimeMillis();
   long duration = t - waitStartTime;
   boolean waitTimeValid = (duration >= nmRmConnectionWaitMs) &&
-  (duration < (connectionWaitMs + delta));
+  (duration < (nmRmConnectionWaitMs + delta));
 
   if(!waitTimeValid) {
 // throw exception if NM doesn't retry long enough
 throw new Exception("NM should have tried re-connecting to RM during " 
+
-  "period of at least " + connectionWaitMs + " ms, but " +
-  "stopped retrying within " + (connectionWaitMs + delta) +
+  "period of at least " + nmRmConnectionWaitMs + " ms, but " +
+  "stopped retrying within " + (nmRmConnectionWaitMs + delta) +
   " ms: " + e, e);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5808. Add gc log options to the yarn daemon script when starting services-api. Contributed by Billie Rinaldi

2016-11-10 Thread gourksaha
Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services b8fecf558 -> 82a0ec080


YARN-5808. Add gc log options to the yarn daemon script when starting 
services-api. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82a0ec08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82a0ec08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82a0ec08

Branch: refs/heads/yarn-native-services
Commit: 82a0ec08029e5154483930a4bb47940751895211
Parents: b8fecf5
Author: Gour Saha 
Authored: Thu Nov 10 11:35:02 2016 -0800
Committer: Gour Saha 
Committed: Thu Nov 10 11:35:02 2016 -0800

--
 hadoop-yarn-project/hadoop-yarn/bin/yarn | 16 
 hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh | 12 
 2 files changed, 20 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82a0ec08/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 26d54b8..2396a7a 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -47,7 +47,7 @@ function hadoop_usage
   hadoop_add_subcommand "resourcemanager" "run the ResourceManager"
   hadoop_add_subcommand "rmadmin" "admin tools"
   hadoop_add_subcommand "scmadmin" "SharedCacheManager admin tools"
-  hadoop_add_subcommand "services-api" "run slider services api"
+  hadoop_add_subcommand "servicesapi" "run slider services api"
   hadoop_add_subcommand "sharedcachemanager" "run the SharedCacheManager 
daemon"
   hadoop_add_subcommand "slider" "run a slider app"
   hadoop_add_subcommand "timelinereader" "run the timeline reader server"
@@ -144,20 +144,20 @@ function yarncmd_case
 scmadmin)
   HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin'
 ;;
-services-api)
+servicesapi)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
   hadoop_add_classpath 
"${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}/slider"'/*'
   hadoop_add_classpath 
"${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}/services-api"'/*'
   
HADOOP_CLASSNAME='org.apache.hadoop.yarn.services.webapp.ApplicationApiWebApp'
-  hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS} \
--Dslider.libdir=${HADOOP_YARN_HOME}/${YARN_DIR},\
+  local sld="${HADOOP_YARN_HOME}/${YARN_DIR},\
 ${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR},\
 ${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}/slider,\
 ${HADOOP_HDFS_HOME}/${HDFS_DIR},\
 ${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR},\
 ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR},\
 ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
+  hadoop_translate_cygwin_path sld
+  hadoop_add_param HADOOP_OPTS slider.libdir "-Dslider.libdir=${sld}"
 ;;
 sharedcachemanager)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
@@ -166,15 +166,15 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
 slider)
   hadoop_add_classpath 
"${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}/slider"'/*'
   HADOOP_CLASSNAME='org.apache.slider.Slider'
-  hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS} \
--Dslider.libdir=${HADOOP_YARN_HOME}/${YARN_DIR},\
+  local sld="${HADOOP_YARN_HOME}/${YARN_DIR},\
 ${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR},\
 ${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}/slider,\
 ${HADOOP_HDFS_HOME}/${HDFS_DIR},\
 ${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR},\
 ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR},\
 ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
+  hadoop_translate_cygwin_path sld
+  hadoop_add_param HADOOP_OPTS slider.libdir "-Dslider.libdir=${sld}"
 ;;
 timelinereader)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82a0ec08/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
--
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh 
b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
index d003adb..3828897 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
+++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
@@ -136,3 +136,15 @@
 # See ResourceManager for some examples
 #
 #export YARN_SHAREDCACHEMANAGER_OPTS=
+
+###
+# Services API specific parameters
+###
+# Specify the JVM options to be used when starting the services API.
+#
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# See ResourceManager for some examples
+#
+#export 

hadoop git commit: HADOOP-13687. Provide a unified dependency artifact that transitively includes the cloud storage modules shipped with Hadoop. Contributed by Chris Nauroth

2016-11-10 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f7b25420a -> d064d5dbb


HADOOP-13687. Provide a unified dependency artifact that transitively includes 
the cloud storage modules shipped with Hadoop. Contributed by Chris Nauroth

(cherry picked from commit 89354f0475efa8e393697b1ddc227c94a76b5923)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d064d5db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d064d5db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d064d5db

Branch: refs/heads/branch-2
Commit: d064d5dbb2adb48e1d72d6aafb9f3e3f56950480
Parents: f7b2542
Author: Mingliang Liu 
Authored: Thu Nov 10 08:58:37 2016 -0800
Committer: Mingliang Liu 
Committed: Thu Nov 10 08:59:07 2016 -0800

--
 .../hadoop-cloud-storage/pom.xml| 127 +++
 hadoop-cloud-storage-project/pom.xml|  54 
 hadoop-project/pom.xml  |   5 +
 hadoop-tools/hadoop-openstack/pom.xml   |   2 +-
 4 files changed, 187 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d064d5db/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
--
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml 
b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
new file mode 100644
index 000..7993b83
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -0,0 +1,127 @@
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-project
+3.0.0-alpha2-SNAPSHOT
+../../hadoop-project
+  
+  org.apache.hadoop
+  hadoop-cloud-storage
+  3.0.0-alpha2-SNAPSHOT
+  jar
+
+  Apache Hadoop Cloud Storage
+  Apache Hadoop Cloud Storage
+
+  
+cloud-storage
+  
+
+  
+
+  org.apache.hadoop
+  hadoop-annotations
+  compile
+  
+
+  jdk.tools
+  jdk.tools
+
+  
+
+
+  org.apache.hadoop
+  hadoop-common
+  compile
+  
+
+  javax.servlet
+  servlet-api
+
+
+  commons-logging
+  commons-logging-api
+
+
+  jetty
+  org.mortbay.jetty
+
+
+  org.mortbay.jetty
+  jetty
+
+
+  org.mortbay.jetty
+  servlet-api-2.5
+
+
+  com.sun.jersey
+  jersey-core
+
+
+  com.sun.jersey
+  jersey-json
+
+
+  com.sun.jersey
+  jersey-server
+
+
+  org.eclipse.jdt
+  core
+
+
+  org.apache.avro
+  avro-ipc
+
+
+  net.sf.kosmosfs
+  kfs
+
+
+  net.java.dev.jets3t
+  jets3t
+
+
+  com.jcraft
+  jsch
+
+
+  org.apache.zookeeper
+  zookeeper
+
+  
+
+
+  org.apache.hadoop
+  hadoop-aws
+  compile
+
+
+  org.apache.hadoop
+  hadoop-azure
+  compile
+
+
+  org.apache.hadoop
+  hadoop-openstack
+  compile
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d064d5db/hadoop-cloud-storage-project/pom.xml
--
diff --git a/hadoop-cloud-storage-project/pom.xml 
b/hadoop-cloud-storage-project/pom.xml
new file mode 100644
index 000..94d4c02
--- /dev/null
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -0,0 +1,54 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+  http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-project
+3.0.0-alpha2-SNAPSHOT
+../hadoop-project
+  
+  org.apache.hadoop
+  hadoop-cloud-storage-project
+  3.0.0-alpha2-SNAPSHOT
+  Apache Hadoop Cloud Storage Project
+  Apache Hadoop Cloud Storage Project
+  pom
+
+  
+hadoop-cloud-storage
+  
+
+  
+
+  
+maven-deploy-plugin
+
+  true
+
+  
+  
+org.apache.rat
+apache-rat-plugin
+
+
+  
+
+  
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d064d5db/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 

hadoop git commit: HADOOP-13687. Provide a unified dependency artifact that transitively includes the cloud storage modules shipped with Hadoop. Contributed by Chris Nauroth

2016-11-10 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk ca68f9cb5 -> 89354f047


HADOOP-13687. Provide a unified dependency artifact that transitively includes 
the cloud storage modules shipped with Hadoop. Contributed by Chris Nauroth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89354f04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89354f04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89354f04

Branch: refs/heads/trunk
Commit: 89354f0475efa8e393697b1ddc227c94a76b5923
Parents: ca68f9c
Author: Mingliang Liu 
Authored: Thu Nov 10 08:58:37 2016 -0800
Committer: Mingliang Liu 
Committed: Thu Nov 10 08:58:37 2016 -0800

--
 .../hadoop-cloud-storage/pom.xml| 127 +++
 hadoop-cloud-storage-project/pom.xml|  54 
 hadoop-project/pom.xml  |   5 +
 hadoop-tools/hadoop-openstack/pom.xml   |   2 +-
 4 files changed, 187 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89354f04/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
--
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml 
b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
new file mode 100644
index 000..7993b83
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -0,0 +1,127 @@
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-project
+3.0.0-alpha2-SNAPSHOT
+../../hadoop-project
+  
+  org.apache.hadoop
+  hadoop-cloud-storage
+  3.0.0-alpha2-SNAPSHOT
+  jar
+
+  Apache Hadoop Cloud Storage
+  Apache Hadoop Cloud Storage
+
+  
+cloud-storage
+  
+
+  
+
+  org.apache.hadoop
+  hadoop-annotations
+  compile
+  
+
+  jdk.tools
+  jdk.tools
+
+  
+
+
+  org.apache.hadoop
+  hadoop-common
+  compile
+  
+
+  javax.servlet
+  servlet-api
+
+
+  commons-logging
+  commons-logging-api
+
+
+  jetty
+  org.mortbay.jetty
+
+
+  org.mortbay.jetty
+  jetty
+
+
+  org.mortbay.jetty
+  servlet-api-2.5
+
+
+  com.sun.jersey
+  jersey-core
+
+
+  com.sun.jersey
+  jersey-json
+
+
+  com.sun.jersey
+  jersey-server
+
+
+  org.eclipse.jdt
+  core
+
+
+  org.apache.avro
+  avro-ipc
+
+
+  net.sf.kosmosfs
+  kfs
+
+
+  net.java.dev.jets3t
+  jets3t
+
+
+  com.jcraft
+  jsch
+
+
+  org.apache.zookeeper
+  zookeeper
+
+  
+
+
+  org.apache.hadoop
+  hadoop-aws
+  compile
+
+
+  org.apache.hadoop
+  hadoop-azure
+  compile
+
+
+  org.apache.hadoop
+  hadoop-openstack
+  compile
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89354f04/hadoop-cloud-storage-project/pom.xml
--
diff --git a/hadoop-cloud-storage-project/pom.xml 
b/hadoop-cloud-storage-project/pom.xml
new file mode 100644
index 000..94d4c02
--- /dev/null
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -0,0 +1,54 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+  http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-project
+3.0.0-alpha2-SNAPSHOT
+../hadoop-project
+  
+  org.apache.hadoop
+  hadoop-cloud-storage-project
+  3.0.0-alpha2-SNAPSHOT
+  Apache Hadoop Cloud Storage Project
+  Apache Hadoop Cloud Storage Project
+  pom
+
+  
+hadoop-cloud-storage
+  
+
+  
+
+  
+maven-deploy-plugin
+
+  true
+
+  
+  
+org.apache.rat
+apache-rat-plugin
+
+
+  
+
+  
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89354f04/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index ca567c5..5750a3f 100644
--- a/hadoop-project/pom.xml
+++ 

hadoop git commit: HDFS-9337. Validate required params for WebHDFS requests (Contributed by Jagadesh Kiran N)

2016-11-10 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 86ac1ad9f -> ca68f9cb5


HDFS-9337. Validate required params for WebHDFS requests (Contributed by 
Jagadesh Kiran N)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca68f9cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca68f9cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca68f9cb

Branch: refs/heads/trunk
Commit: ca68f9cb5bc78e996c0daf8024cf0e7a4faef12a
Parents: 86ac1ad
Author: Vinayakumar B 
Authored: Thu Nov 10 16:51:33 2016 +0530
Committer: Vinayakumar B 
Committed: Thu Nov 10 16:51:33 2016 +0530

--
 .../web/resources/NamenodeWebHdfsMethods.java   | 31 +---
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md|  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   |  3 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 21 -
 4 files changed, 51 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca68f9cb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 15195e0..5d9b12a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -424,6 +424,18 @@ public class NamenodeWebHdfsMethods {
 excludeDatanodes, createFlagParam, noredirect);
   }
 
+  /** Validate all required params. */
+  @SuppressWarnings("rawtypes")
+  private void validateOpParams(HttpOpParam op, Param... params) {
+for (Param param : params) {
+  if (param.getValue() == null || param.getValueString() == null || param
+  .getValueString().isEmpty()) {
+throw new IllegalArgumentException("Required param " + param.getName()
++ " for op: " + op.getValueString() + " is null or empty");
+  }
+}
+  }
+
   /** Handle HTTP PUT request. */
   @PUT
   @Path("{" + UriFsPathParam.NAME + ":.*}")
@@ -576,6 +588,7 @@ public class NamenodeWebHdfsMethods {
 }
 case CREATESYMLINK:
 {
+  validateOpParams(op, destination);
   np.createSymlink(destination.getValue(), fullpath,
   PermissionParam.getDefaultSymLinkFsPermission(),
   createParent.getValue());
@@ -583,6 +596,7 @@ public class NamenodeWebHdfsMethods {
 }
 case RENAME:
 {
+  validateOpParams(op, destination);
   final EnumSet s = renameOptions.getValue();
   if (s.isEmpty()) {
 final boolean b = np.rename(fullpath, destination.getValue());
@@ -621,6 +635,7 @@ public class NamenodeWebHdfsMethods {
 }
 case RENEWDELEGATIONTOKEN:
 {
+  validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
   final long expiryTime = np.renewDelegationToken(token);
@@ -629,16 +644,19 @@ public class NamenodeWebHdfsMethods {
 }
 case CANCELDELEGATIONTOKEN:
 {
+  validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
   np.cancelDelegationToken(token);
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case MODIFYACLENTRIES: {
+  validateOpParams(op, aclPermission);
   np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true));
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case REMOVEACLENTRIES: {
+  validateOpParams(op, aclPermission);
   np.removeAclEntries(fullpath, aclPermission.getAclPermission(false));
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
@@ -651,10 +669,12 @@ public class NamenodeWebHdfsMethods {
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case SETACL: {
+  validateOpParams(op, aclPermission);
   np.setAcl(fullpath, aclPermission.getAclPermission(true));
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case SETXATTR: {
+  validateOpParams(op, xattrName, xattrSetFlag);
   np.setXAttr(
   fullpath,
   XAttrHelper.buildXAttr(xattrName.getXAttrName(),
@@ -662,6 

svn commit: r1769080 - /hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

2016-11-10 Thread rohithsharmaks
Author: rohithsharmaks
Date: Thu Nov 10 10:56:46 2016
New Revision: 1769080

URL: http://svn.apache.org/viewvc?rev=1769080=rev
Log:
Updated PMC

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1769080=1769079=1769080=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Thu 
Nov 10 10:56:46 2016
@@ -367,6 +367,14 @@
  -8

 
+   
+ rohithsharmaks
+ Rohith Sharma K S
+ Hortonworks
+ 
+ +5.5
+   
+

  sharad
  Sharad Agarwal
@@ -1198,7 +1206,7 @@

  rohithsharmaks
  Rohith Sharma K S
- Huawei
+ Hortonworks
  
  +5.5




-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org