hadoop git commit: HADOOP-12082 Support multiple authentication schemes via AuthenticationFilter. Contributed by Hrishikesh Gadre.

2016-10-21 Thread benoy
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 b82364759 -> 653ceab7d


HADOOP-12082 Support multiple authentication schemes via AuthenticationFilter. 
Contributed by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/653ceab7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/653ceab7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/653ceab7

Branch: refs/heads/branch-2.8
Commit: 653ceab7d35670d488e1dfc7500ca281bc4327e0
Parents: b823647
Author: Benoy Antony 
Authored: Fri Oct 21 19:43:22 2016 -0700
Committer: Benoy Antony 
Committed: Fri Oct 21 19:43:22 2016 -0700

--
 hadoop-common-project/hadoop-auth/pom.xml   |  24 ++
 .../client/KerberosAuthenticator.java   |   8 +-
 .../server/AuthenticationFilter.java|  47 ++-
 .../server/AuthenticationHandler.java   |   2 +-
 .../server/AuthenticationHandlerUtil.java   | 105 ++
 .../server/CompositeAuthenticationHandler.java  |  30 ++
 .../authentication/server/HttpConstants.java|  55 +++
 .../server/LdapAuthenticationHandler.java   | 339 +++
 .../MultiSchemeAuthenticationHandler.java   | 209 
 .../authentication/server/package-info.java |  27 ++
 .../src/site/markdown/Configuration.md  | 137 
 .../client/TestKerberosAuthenticator.java   |  71 +++-
 .../authentication/server/LdapConstants.java|  31 ++
 .../server/TestLdapAuthenticationHandler.java   | 159 +
 .../TestMultiSchemeAuthenticationHandler.java   | 189 +++
 .../DelegationTokenAuthenticationFilter.java|   9 +-
 .../DelegationTokenAuthenticationHandler.java   |  25 +-
 ...emeDelegationTokenAuthenticationHandler.java | 182 ++
 hadoop-project/pom.xml  |   3 +
 19 files changed, 1630 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/653ceab7/hadoop-common-project/hadoop-auth/pom.xml
--
diff --git a/hadoop-common-project/hadoop-auth/pom.xml 
b/hadoop-common-project/hadoop-auth/pom.xml
index f59cf84..9d99a05 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -154,6 +154,30 @@
   curator-test
   test
 
+
+  org.apache.directory.server
+  apacheds-server-integ
+  ${apacheds.version}
+  test
+  
+
+  org.apache.directory.api
+  api-ldap-schema-data
+
+  
+
+
+  org.apache.directory.server
+  apacheds-core-integ
+  ${apacheds.version}
+  test
+  
+
+  org.apache.directory.api
+  api-ldap-schema-data
+
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/653ceab7/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index a69ee46..ceec927 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -14,6 +14,7 @@
 package org.apache.hadoop.security.authentication.client;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.security.authentication.server.HttpConstants;
 import org.apache.hadoop.security.authentication.util.AuthToken;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
@@ -57,17 +58,18 @@ public class KerberosAuthenticator implements Authenticator 
{
   /**
* HTTP header used by the SPNEGO server endpoint during an authentication 
sequence.
*/
-  public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
+  public static final String WWW_AUTHENTICATE =
+  HttpConstants.WWW_AUTHENTICATE_HEADER;
 
   /**
* HTTP header used by the SPNEGO client endpoint during an authentication 
sequence.
*/
-  public static final String AUTHORIZATION = "Authorization";
+  public static final String AUTHORIZATION = 
HttpConstants.AUTHORIZATION_HEADER;
 
   /**
* HTTP header prefix used by the SPNEGO client/server endpoints during an 
authentication sequence.
*/
-  public static final String NEGOTIATE = "Negotiate";
+  public static final String NEGOTIATE = 

hadoop git commit: MAPREDUCE-6728. Give fetchers hint when ShuffleHandler rejects a shuffling connection (haibochen via rkanter)

2016-10-21 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk c473490da -> d4725bfcb


MAPREDUCE-6728. Give fetchers hint when ShuffleHandler rejects a shuffling 
connection (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4725bfc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4725bfc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4725bfc

Branch: refs/heads/trunk
Commit: d4725bfcb2d300219d65395a78f957afbf37b201
Parents: c473490
Author: Robert Kanter 
Authored: Fri Oct 21 17:46:17 2016 -0700
Committer: Robert Kanter 
Committed: Fri Oct 21 17:46:17 2016 -0700

--
 .../hadoop/mapreduce/task/reduce/Fetcher.java   | 36 
 .../hadoop/mapreduce/task/reduce/MapHost.java   |  4 --
 .../task/reduce/ShuffleSchedulerImpl.java   | 43 
 .../mapreduce/task/reduce/TestFetcher.java  | 22 ++
 .../apache/hadoop/mapred/ShuffleHandler.java| 27 ++--
 .../hadoop/mapred/TestShuffleHandler.java   | 17 +---
 6 files changed, 126 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4725bfc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
index be2f84f..c6889cb 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
@@ -65,6 +65,11 @@ class Fetcher extends Thread {
   /* Default read timeout (in milliseconds) */
   private final static int DEFAULT_READ_TIMEOUT = 3 * 60 * 1000;
 
+  // This should be kept in sync with ShuffleHandler.FETCH_RETRY_DELAY.
+  private static final long FETCH_RETRY_DELAY_DEFAULT = 1000L;
+  static final int TOO_MANY_REQ_STATUS_CODE = 429;
+  private static final String FETCH_RETRY_AFTER_HEADER = "Retry-After";
+
   protected final Reporter reporter;
   private static enum ShuffleErrors{IO_ERROR, WRONG_LENGTH, BAD_ID, WRONG_MAP,
 CONNECTION, WRONG_REDUCE}
@@ -269,6 +274,13 @@ class Fetcher extends Thread {
   } else {
 input = new DataInputStream(connection.getInputStream());
   }
+} catch (TryAgainLaterException te) {
+  LOG.warn("Connection rejected by the host " + te.host +
+  ". Will retry later.");
+  scheduler.penalize(host, te.backoff);
+  for (TaskAttemptID left : remaining) {
+scheduler.putBackKnownMapOutput(host, left);
+  }
 } catch (IOException ie) {
   boolean connectExcpt = ie instanceof ConnectException;
   ioErrs.increment(1);
@@ -427,6 +439,19 @@ class Fetcher extends Thread {
   throws IOException {
 // Validate response code
 int rc = connection.getResponseCode();
+// See if the shuffleHandler rejected the connection due to too many
+// reducer requests. If so, signal fetchers to back off.
+if (rc == TOO_MANY_REQ_STATUS_CODE) {
+  long backoff = connection.getHeaderFieldLong(FETCH_RETRY_AFTER_HEADER,
+  FETCH_RETRY_DELAY_DEFAULT);
+  // in case we get a negative backoff from ShuffleHandler
+  if (backoff < 0) {
+backoff = FETCH_RETRY_DELAY_DEFAULT;
+LOG.warn("Get a negative backoff value from ShuffleHandler. Setting" +
+" it to the default value " + FETCH_RETRY_DELAY_DEFAULT);
+  }
+  throw new TryAgainLaterException(backoff, url.getHost());
+}
 if (rc != HttpURLConnection.HTTP_OK) {
   throw new IOException(
   "Got invalid response code " + rc + " from " + url +
@@ -728,4 +753,15 @@ class Fetcher extends Thread {
   }
 }
   }
+
+  private static class TryAgainLaterException extends IOException {
+public final long backoff;
+public final String host;
+
+public TryAgainLaterException(long backoff, String host) {
+  super("Too many requests to a map host");
+  this.backoff = backoff;
+  this.host = host;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4725bfc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java

hadoop git commit: HADOOP-13732. Upgrade OWASP dependency-check plugin version. Contributed by Mike Yoder.

2016-10-21 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 25438526e -> c473490da


HADOOP-13732. Upgrade OWASP dependency-check plugin version. Contributed by 
Mike Yoder.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c473490d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c473490d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c473490d

Branch: refs/heads/trunk
Commit: c473490da01c5909209b138034e1a1c85e174247
Parents: 2543852
Author: Andrew Wang 
Authored: Fri Oct 21 16:41:30 2016 -0700
Committer: Andrew Wang 
Committed: Fri Oct 21 16:41:39 2016 -0700

--
 BUILDING.txt | 8 
 pom.xml  | 5 +++--
 2 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c473490d/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 9d297f7..1fbf8bb 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -212,6 +212,14 @@ Maven build goals:
 and it ignores the -Disal.prefix option. If -Disal.lib isn't given, the
 bundling and building will fail.
 
+ Special plugins: OWASP's dependency-check:
+
+   OWASP's dependency-check plugin will scan the third party dependencies
+   of this project for known CVEs (security vulnerabilities against them).
+   It will produce a report in target/dependency-check-report.html. To
+   invoke, run 'mvn dependency-check:aggregate'. Note that this plugin
+   requires maven 3.1.1 or greater.
+
 
--
 Building components separately
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c473490d/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 1a3cd28..860c2d7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -107,7 +107,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 1.0.0
 2.15
 6.6
-1.3.6
+1.4.3
 
 bash
   
@@ -407,7 +407,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
  dependencies of this project for known CVEs (security
  vulnerabilities against them). It will produce a report
  in target/dependency-check-report.html. To invoke, run
- 'mvn dependency-check:aggregate'
+ 'mvn dependency-check:aggregate'. Note that this plugin
+ requires maven 3.1.1 or greater.
 -->
 org.owasp
 dependency-check-maven


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11033. Add documents for native raw erasure coder in XOR codes. Contributed by SammiChen.

2016-10-21 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk df857f0d1 -> 25438526e


HDFS-11033. Add documents for native raw erasure coder in XOR codes. 
Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25438526
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25438526
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25438526

Branch: refs/heads/trunk
Commit: 25438526edd6b3fef23daddd29facfca8f840913
Parents: df857f0
Author: Wei-Chiu Chuang 
Authored: Fri Oct 21 16:20:07 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Oct 21 16:20:07 2016 -0700

--
 .../src/main/resources/core-default.xml | 22 +++-
 .../src/site/markdown/HDFSErasureCoding.md  |  4 +++-
 2 files changed, 24 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25438526/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index daa421c..59d939b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -647,7 +647,27 @@
   io.erasurecode.codec.rs-default.rawcoder
   
org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory
   
-Raw coder implementation for the rs-default codec.
+Raw coder implementation for the rs-default codec. The default value is a
+pure Java implementation. There is also a native implementation. Its value
+is 
org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory.
+  
+
+
+
+  io.erasurecode.codec.rs-legacy.rawcoder
+  
org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy
+  
+Raw coder implementation for the rs-legacy codec.
+  
+
+
+
+  io.erasurecode.codec.xor.rawcoder
+  
org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory
+  
+Raw coder implementation for the xor codec. The default value is a pure 
Java
+implementation. There is also a native implementation. Its value is
+org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory.
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25438526/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 627260f..9119b1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -102,7 +102,7 @@ Deployment
   `io.erasurecode.codec.rs-default.rawcoder` for the default RS codec,
   `io.erasurecode.codec.rs-legacy.rawcoder` for the legacy RS codec,
   `io.erasurecode.codec.xor.rawcoder` for the XOR codec.
-  The default implementations for all of these codecs are pure Java. For 
default RS codec, there is also a native implementation which leverages Intel 
ISA-L library to improve the encoding and decoding calculation. Please refer to 
section "Enable Intel ISA-L" for more detail information.
+  The default implementations for all of these codecs are pure Java. For 
default RS codec, there is also a native implementation which leverages Intel 
ISA-L library to improve the performance of codec. For XOR codec, a native 
implementation which leverages Intel ISA-L library to improve the performance 
of codec is also supported. Please refer to section "Enable Intel ISA-L" for 
more detail information.
 
   Erasure coding background recovery work on the DataNodes can also be tuned 
via the following configuration parameters:
 
@@ -119,6 +119,8 @@ Deployment
 
   To check ISA-L library enable state, try "Hadoop checknative" command. It 
will tell you if ISA-L library is enabled or not.
 
+  It also requires three steps to enable the native implementation of XOR 
codec. The first two steps are the same as the above step 1 and step 2. In step 
3, configure the `io.erasurecode.codec.xor.rawcoder` key with 
`org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory` on 
both HDFS client and DataNodes.
+
 ### Administrative commands
 
   HDFS provides an `erasurecode` subcommand to perform administrative commands 
related to erasure coding.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, 

hadoop git commit: HDFS-10975. fsck -list-corruptfileblocks does not report corrupt EC files. Contributed by Takanobu Asanuma.

2016-10-21 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk be7237224 -> df857f0d1


HDFS-10975. fsck -list-corruptfileblocks does not report corrupt EC files. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df857f0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df857f0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df857f0d

Branch: refs/heads/trunk
Commit: df857f0d10bda9fbb9c3f6ec77aba0cf46fe3631
Parents: be72372
Author: Wei-Chiu Chuang 
Authored: Fri Oct 21 15:43:12 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Oct 21 15:43:12 2016 -0700

--
 .../hadoop/hdfs/server/namenode/NamenodeFsck.java| 15 ---
 .../apache/hadoop/hdfs/server/namenode/TestFsck.java |  6 ++
 2 files changed, 14 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df857f0d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index a2e249d..97a6248 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -660,6 +660,13 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   decommissioningReplicas;
   res.totalReplicas += totalReplicasPerBlock;
 
+  boolean isMissing;
+  if (storedBlock.isStriped()) {
+isMissing = totalReplicasPerBlock < minReplication;
+  } else {
+isMissing = totalReplicasPerBlock == 0;
+  }
+
   // count expected replicas
   short targetFileReplication;
   if (file.getErasureCodingPolicy() != null) {
@@ -697,7 +704,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 res.numMinReplicatedBlocks++;
 
   // count missing replicas / under replicated blocks
-  if (totalReplicasPerBlock < targetFileReplication && 
totalReplicasPerBlock > 0) {
+  if (totalReplicasPerBlock < targetFileReplication && !isMissing) {
 res.missingReplicas += (targetFileReplication - totalReplicasPerBlock);
 res.numUnderReplicatedBlocks += 1;
 underReplicatedPerFile++;
@@ -737,12 +744,6 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   String blkName = block.toString();
   report.append(blockNumber + ". " + blkName + " len=" +
   block.getNumBytes());
-  boolean isMissing;
-  if (storedBlock.isStriped()) {
-isMissing = totalReplicasPerBlock < minReplication;
-  } else {
-isMissing = totalReplicasPerBlock == 0;
-  }
   if (isMissing && !isCorrupt) {
 // If the block is corrupted, it means all its available replicas are
 // corrupted in the case of replication, and it means the state of the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df857f0d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 254a86c..12ae858 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -2013,6 +2013,9 @@ public class TestFsck {
 
 String outStr = runFsck(conf, 1, true, "/");
 assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+assertTrue(outStr.contains("Under-erasure-coded block groups:\t0"));
+outStr = runFsck(conf, -1, true, "/", "-list-corruptfileblocks");
+assertTrue(outStr.contains("has 1 CORRUPT files"));
   }
 
   @Test (timeout = 30)
@@ -2053,6 +2056,9 @@ public class TestFsck {
 "-locations");
 assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
 assertTrue(outStr.contains("Live_repl=" + (dataBlocks - 1)));
+assertTrue(outStr.contains("Under-erasure-coded block groups:\t0"));
+outStr = runFsck(conf, -1, true, "/", "-list-corruptfileblocks");
+assertTrue(outStr.contains("has 1 CORRUPT files"));
   }
 
   private void waitForUnrecoverableBlockGroup(Configuration configuration)



hadoop git commit: fix a build break due to merge

2016-10-21 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 a9e45ed3e -> 11ec1c69a


fix a build break due to merge


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11ec1c69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11ec1c69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11ec1c69

Branch: refs/heads/HDFS-7240
Commit: 11ec1c69aa79e9fa2d6f3c2bd6cce4fa4bce4f14
Parents: a9e45ed
Author: Anu Engineer 
Authored: Fri Oct 21 15:41:14 2016 -0700
Committer: Anu Engineer 
Committed: Fri Oct 21 15:41:14 2016 -0700

--
 .../org/apache/hadoop/ozone/storage/StorageContainerManager.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11ec1c69/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java
index 6567ae4..5ffcef1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java
@@ -247,7 +247,7 @@ public class StorageContainerManager
 for (int r = 0; r < reports.length; r++) {
   final BlockListAsLongs storageContainerList = reports[r].getBlocks();
   blockManager.processReport(registration, reports[r].getStorage(),
-  storageContainerList, context, r == (reports.length - 1));
+  storageContainerList, context);
 }
 return null;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10757. KMSClientProvider combined with KeyProviderCache can result in wrong UGI being used. Contributed by Xiaoyu Yao.

2016-10-21 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7d19394bd -> b82364759


HDFS-10757. KMSClientProvider combined with KeyProviderCache can result in 
wrong UGI being used. Contributed by Xiaoyu Yao.

(cherry picked from commit be7237224819e2491aef91cd4f055c7efcf7b90d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8236475
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8236475
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8236475

Branch: refs/heads/branch-2.8
Commit: b82364759ca8600a43741e35c6781bec8249c547
Parents: 7d19394
Author: Xiaoyu Yao 
Authored: Fri Oct 21 14:23:02 2016 -0700
Committer: Xiaoyu Yao 
Committed: Fri Oct 21 15:05:57 2016 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   | 52 ++--
 .../hadoop/security/UserGroupInformation.java   | 14 ++
 2 files changed, 40 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8236475/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 3d81941..65f2487 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -373,7 +373,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private ConnectionConfigurator configurator;
   private DelegationTokenAuthenticatedURL.Token authToken;
   private final int authRetry;
-  private final UserGroupInformation actualUgi;
 
   @Override
   public String toString() {
@@ -455,15 +454,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
 new EncryptedQueueRefiller());
 authToken = new DelegationTokenAuthenticatedURL.Token();
-UserGroupInformation.AuthenticationMethod authMethod =
-UserGroupInformation.getCurrentUser().getAuthenticationMethod();
-if (authMethod == UserGroupInformation.AuthenticationMethod.PROXY) {
-  actualUgi = UserGroupInformation.getCurrentUser().getRealUser();
-} else if (authMethod == UserGroupInformation.AuthenticationMethod.TOKEN) {
-  actualUgi = UserGroupInformation.getLoginUser();
-} else {
-  actualUgi =UserGroupInformation.getCurrentUser();
-}
   }
 
   private static Path extractKMSPath(URI uri) throws MalformedURLException, 
IOException {
@@ -530,19 +520,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   throws IOException {
 HttpURLConnection conn;
 try {
-  // if current UGI is different from UGI at constructor time, behave as
-  // proxyuser
-  UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
-  final String doAsUser = (currentUgi.getAuthenticationMethod() ==
-  UserGroupInformation.AuthenticationMethod.PROXY)
-  ? currentUgi.getShortUserName() : null;
-
-  // If current UGI contains kms-dt && is not proxy, doAs it to use its dt.
-  // Otherwise, create the HTTP connection using the UGI at constructor 
time
-  UserGroupInformation ugiToUse =
-  (currentUgiContainsKmsDt() && doAsUser == null) ?
-  currentUgi : actualUgi;
-  conn = ugiToUse.doAs(new PrivilegedExceptionAction() {
+  final String doAsUser = getDoAsUser();
+  conn = getActualUgi().doAs(new PrivilegedExceptionAction
+  () {
 @Override
 public HttpURLConnection run() throws Exception {
   DelegationTokenAuthenticatedURL authUrl =
@@ -919,7 +899,7 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   token, url, doAsUser);
   final DelegationTokenAuthenticatedURL authUrl =
   new DelegationTokenAuthenticatedURL(configurator);
-  return actualUgi.doAs(
+  return getActualUgi().doAs(
   new PrivilegedExceptionAction() {
 @Override
 public Long run() throws Exception {
@@ -942,7 +922,7 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   final String doAsUser = getDoAsUser();
   final DelegationTokenAuthenticatedURL.Token token =
   generateDelegationToken(dToken);
-  return actualUgi.doAs(
+  return getActualUgi().doAs(
 

hadoop git commit: HADOOP-13381. KMS clients should use KMS Delegation Tokens from current UGI. Contributed by Xiao Chen.

2016-10-21 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 4b56954fe -> 7d19394bd


HADOOP-13381. KMS clients should use KMS Delegation Tokens from current UGI. 
Contributed by Xiao Chen.

(cherry picked from commit 8ebf2e95d2053cb94c6ff87ca018811fe8276f2b)

Conflicts:

hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java

(cherry picked from commit 8fe4b2429a22cf932b701863170336a3b6986dd2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d19394b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d19394b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d19394b

Branch: refs/heads/branch-2.8
Commit: 7d19394bd500c2ea6879cdb02ae301eaf807e0d4
Parents: 4b56954
Author: Xiao Chen 
Authored: Thu Jul 28 18:23:51 2016 -0700
Committer: Xiao Chen 
Committed: Fri Oct 21 14:59:56 2016 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   |  23 +++-
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 125 +++
 2 files changed, 146 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d19394b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 29f7cd5..3d81941 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -37,6 +37,7 @@ import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenRenewer;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
@@ -536,8 +537,12 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   UserGroupInformation.AuthenticationMethod.PROXY)
   ? currentUgi.getShortUserName() : null;
 
-  // creating the HTTP connection using the current UGI at constructor time
-  conn = actualUgi.doAs(new PrivilegedExceptionAction() 
{
+  // If current UGI contains kms-dt && is not proxy, doAs it to use its dt.
+  // Otherwise, create the HTTP connection using the UGI at constructor 
time
+  UserGroupInformation ugiToUse =
+  (currentUgiContainsKmsDt() && doAsUser == null) ?
+  currentUgi : actualUgi;
+  conn = ugiToUse.doAs(new PrivilegedExceptionAction() {
 @Override
 public HttpURLConnection run() throws Exception {
   DelegationTokenAuthenticatedURL authUrl =
@@ -1041,6 +1046,20 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 return dtService;
   }
 
+  private boolean currentUgiContainsKmsDt() throws IOException {
+// Add existing credentials from current UGI, since provider is cached.
+Credentials creds = UserGroupInformation.getCurrentUser().
+getCredentials();
+if (!creds.getAllTokens().isEmpty()) {
+  org.apache.hadoop.security.token.Token
+  dToken = creds.getToken(getDelegationTokenService());
+  if (dToken != null) {
+return true;
+  }
+}
+return false;
+  }
+
   /**
* Shutdown valueQueue executor threads
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d19394b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 3344a6a..0cd3b27 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -38,7 +38,9 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import 

hadoop git commit: HDFS-10757. KMSClientProvider combined with KeyProviderCache can result in wrong UGI being used. Contributed by Xiaoyu Yao.

2016-10-21 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 25f4327f0 -> 0205ad5fa


HDFS-10757. KMSClientProvider combined with KeyProviderCache can result in 
wrong UGI being used. Contributed by Xiaoyu Yao.

(cherry picked from commit be7237224819e2491aef91cd4f055c7efcf7b90d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0205ad5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0205ad5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0205ad5f

Branch: refs/heads/branch-2
Commit: 0205ad5fa81c4001b4d5189dce88b4546a98b2eb
Parents: 25f4327
Author: Xiaoyu Yao 
Authored: Fri Oct 21 14:23:02 2016 -0700
Committer: Xiaoyu Yao 
Committed: Fri Oct 21 14:25:29 2016 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   | 52 ++--
 .../hadoop/security/UserGroupInformation.java   | 14 ++
 2 files changed, 40 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0205ad5f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 3d81941..65f2487 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -373,7 +373,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private ConnectionConfigurator configurator;
   private DelegationTokenAuthenticatedURL.Token authToken;
   private final int authRetry;
-  private final UserGroupInformation actualUgi;
 
   @Override
   public String toString() {
@@ -455,15 +454,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
 new EncryptedQueueRefiller());
 authToken = new DelegationTokenAuthenticatedURL.Token();
-UserGroupInformation.AuthenticationMethod authMethod =
-UserGroupInformation.getCurrentUser().getAuthenticationMethod();
-if (authMethod == UserGroupInformation.AuthenticationMethod.PROXY) {
-  actualUgi = UserGroupInformation.getCurrentUser().getRealUser();
-} else if (authMethod == UserGroupInformation.AuthenticationMethod.TOKEN) {
-  actualUgi = UserGroupInformation.getLoginUser();
-} else {
-  actualUgi =UserGroupInformation.getCurrentUser();
-}
   }
 
   private static Path extractKMSPath(URI uri) throws MalformedURLException, 
IOException {
@@ -530,19 +520,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   throws IOException {
 HttpURLConnection conn;
 try {
-  // if current UGI is different from UGI at constructor time, behave as
-  // proxyuser
-  UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
-  final String doAsUser = (currentUgi.getAuthenticationMethod() ==
-  UserGroupInformation.AuthenticationMethod.PROXY)
-  ? currentUgi.getShortUserName() : null;
-
-  // If current UGI contains kms-dt && is not proxy, doAs it to use its dt.
-  // Otherwise, create the HTTP connection using the UGI at constructor 
time
-  UserGroupInformation ugiToUse =
-  (currentUgiContainsKmsDt() && doAsUser == null) ?
-  currentUgi : actualUgi;
-  conn = ugiToUse.doAs(new PrivilegedExceptionAction() {
+  final String doAsUser = getDoAsUser();
+  conn = getActualUgi().doAs(new PrivilegedExceptionAction
+  () {
 @Override
 public HttpURLConnection run() throws Exception {
   DelegationTokenAuthenticatedURL authUrl =
@@ -919,7 +899,7 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   token, url, doAsUser);
   final DelegationTokenAuthenticatedURL authUrl =
   new DelegationTokenAuthenticatedURL(configurator);
-  return actualUgi.doAs(
+  return getActualUgi().doAs(
   new PrivilegedExceptionAction() {
 @Override
 public Long run() throws Exception {
@@ -942,7 +922,7 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   final String doAsUser = getDoAsUser();
   final DelegationTokenAuthenticatedURL.Token token =
   generateDelegationToken(dToken);
-  return actualUgi.doAs(
+  return getActualUgi().doAs(
   

hadoop git commit: HDFS-10757. KMSClientProvider combined with KeyProviderCache can result in wrong UGI being used. Contributed by Xiaoyu Yao.

2016-10-21 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 23d7d53a4 -> be7237224


HDFS-10757. KMSClientProvider combined with KeyProviderCache can result in 
wrong UGI being used. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be723722
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be723722
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be723722

Branch: refs/heads/trunk
Commit: be7237224819e2491aef91cd4f055c7efcf7b90d
Parents: 23d7d53
Author: Xiaoyu Yao 
Authored: Fri Oct 21 14:23:02 2016 -0700
Committer: Xiaoyu Yao 
Committed: Fri Oct 21 14:23:02 2016 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   | 52 ++--
 .../hadoop/security/UserGroupInformation.java   | 14 ++
 2 files changed, 40 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be723722/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 701e116..db0ee85 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -373,7 +373,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private ConnectionConfigurator configurator;
   private DelegationTokenAuthenticatedURL.Token authToken;
   private final int authRetry;
-  private final UserGroupInformation actualUgi;
 
   @Override
   public String toString() {
@@ -455,15 +454,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
 new EncryptedQueueRefiller());
 authToken = new DelegationTokenAuthenticatedURL.Token();
-UserGroupInformation.AuthenticationMethod authMethod =
-UserGroupInformation.getCurrentUser().getAuthenticationMethod();
-if (authMethod == UserGroupInformation.AuthenticationMethod.PROXY) {
-  actualUgi = UserGroupInformation.getCurrentUser().getRealUser();
-} else if (authMethod == UserGroupInformation.AuthenticationMethod.TOKEN) {
-  actualUgi = UserGroupInformation.getLoginUser();
-} else {
-  actualUgi =UserGroupInformation.getCurrentUser();
-}
   }
 
   private static Path extractKMSPath(URI uri) throws MalformedURLException, 
IOException {
@@ -530,19 +520,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   throws IOException {
 HttpURLConnection conn;
 try {
-  // if current UGI is different from UGI at constructor time, behave as
-  // proxyuser
-  UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
-  final String doAsUser = (currentUgi.getAuthenticationMethod() ==
-  UserGroupInformation.AuthenticationMethod.PROXY)
-  ? currentUgi.getShortUserName() : null;
-
-  // If current UGI contains kms-dt && is not proxy, doAs it to use its dt.
-  // Otherwise, create the HTTP connection using the UGI at constructor 
time
-  UserGroupInformation ugiToUse =
-  (currentUgiContainsKmsDt() && doAsUser == null) ?
-  currentUgi : actualUgi;
-  conn = ugiToUse.doAs(new PrivilegedExceptionAction() {
+  final String doAsUser = getDoAsUser();
+  conn = getActualUgi().doAs(new PrivilegedExceptionAction
+  () {
 @Override
 public HttpURLConnection run() throws Exception {
   DelegationTokenAuthenticatedURL authUrl =
@@ -919,7 +899,7 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   token, url, doAsUser);
   final DelegationTokenAuthenticatedURL authUrl =
   new DelegationTokenAuthenticatedURL(configurator);
-  return actualUgi.doAs(
+  return getActualUgi().doAs(
   new PrivilegedExceptionAction() {
 @Override
 public Long run() throws Exception {
@@ -942,7 +922,7 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   final String doAsUser = getDoAsUser();
   final DelegationTokenAuthenticatedURL.Token token =
   generateDelegationToken(dToken);
-  return actualUgi.doAs(
+  return getActualUgi().doAs(
   new PrivilegedExceptionAction() {
 @Override
 public 

[04/50] [abbrv] hadoop git commit: HADOOP-13737. Cleanup DiskChecker interface. Contributed by Arpit Agarwal.

2016-10-21 Thread wangda
HADOOP-13737. Cleanup DiskChecker interface. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/262827cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/262827cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/262827cf

Branch: refs/heads/YARN-3368
Commit: 262827cf75bf9c48cd95335eb04fd8ff1d64c538
Parents: 5e83a21
Author: Anu Engineer 
Authored: Thu Oct 20 13:26:23 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 20 13:35:26 2016 -0700

--
 .../org/apache/hadoop/util/DiskChecker.java | 178 +++
 .../org/apache/hadoop/util/TestDiskChecker.java |  22 ---
 2 files changed, 68 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/262827cf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
index a36a7a0..2c73af8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
@@ -20,9 +20,6 @@ package org.apache.hadoop.util;
 
 import java.io.File;
 import java.io.IOException;
-import java.nio.file.DirectoryStream;
-import java.nio.file.DirectoryIteratorException;
-import java.nio.file.Files;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -53,62 +50,6 @@ public class DiskChecker {
 }
   }
   
-  /** 
-   * The semantics of mkdirsWithExistsCheck method is different from the mkdirs
-   * method provided in the Sun's java.io.File class in the following way:
-   * While creating the non-existent parent directories, this method checks for
-   * the existence of those directories if the mkdir fails at any point (since
-   * that directory might have just been created by some other process).
-   * If both mkdir() and the exists() check fails for any seemingly 
-   * non-existent directory, then we signal an error; Sun's mkdir would signal
-   * an error (return false) if a directory it is attempting to create already
-   * exists or the mkdir fails.
-   * @param dir
-   * @return true on success, false on failure
-   */
-  public static boolean mkdirsWithExistsCheck(File dir) {
-if (dir.mkdir() || dir.exists()) {
-  return true;
-}
-File canonDir = null;
-try {
-  canonDir = dir.getCanonicalFile();
-} catch (IOException e) {
-  return false;
-}
-String parent = canonDir.getParent();
-return (parent != null) && 
-   (mkdirsWithExistsCheck(new File(parent)) &&
-  (canonDir.mkdir() || canonDir.exists()));
-  }
-
-  /**
-   * Recurse down a directory tree, checking all child directories.
-   * @param dir
-   * @throws DiskErrorException
-   */
-  public static void checkDirs(File dir) throws DiskErrorException {
-checkDir(dir);
-IOException ex = null;
-try (DirectoryStream stream =
-Files.newDirectoryStream(dir.toPath())) {
-  for (java.nio.file.Path entry: stream) {
-File child = entry.toFile();
-if (child.isDirectory()) {
-  checkDirs(child);
-}
-  }
-} catch (DirectoryIteratorException de) {
-  ex = de.getCause();
-} catch (IOException ie) {
-  ex = ie;
-}
-if (ex != null) {
-  throw new DiskErrorException("I/O error when open a directory: "
-  + dir.toString(), ex);
-}
-  }
-
   /**
* Create the directory if it doesn't exist and check that dir is readable,
* writable and executable
@@ -121,39 +62,7 @@ public class DiskChecker {
   throw new DiskErrorException("Cannot create directory: "
+ dir.toString());
 }
-checkDirAccess(dir);
-  }
-
-  /**
-   * Create the directory or check permissions if it already exists.
-   *
-   * The semantics of mkdirsWithExistsAndPermissionCheck method is different
-   * from the mkdirs method provided in the Sun's java.io.File class in the
-   * following way:
-   * While creating the non-existent parent directories, this method checks for
-   * the existence of those directories if the mkdir fails at any point (since
-   * that directory might have just been created by some other process).
-   * If both mkdir() and the exists() check fails for any seemingly
-   * non-existent directory, then we signal an error; Sun's mkdir would signal
-   * an error (return false) if a 

[16/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ca4351/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
new file mode 100644
index 000..c546bf7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
@@ -0,0 +1,19 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+Sorry, Error Occured.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ca4351/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
new file mode 100644
index 000..588ea44
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
@@ -0,0 +1,20 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+404, Not Found
+Please Check your URL

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ca4351/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
index e58d6bd..3a79080 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
@@ -1,3 +1,3 @@
 {{app-table table-id="apps-table" arr=model}}
-{{simple-table table-id="apps-table" bFilter=true colTypes="elapsed-time" 
colTargets="7"}}
-{{outlet}}
\ No newline at end of file
+{{simple-table table-id="apps-table" bFilter=true colsOrder="0,desc" 
colTypes="natural elapsed-time" colTargets="0 7"}}
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ca4351/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
new file mode 100644
index 000..9cc3b0f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
@@ -0,0 +1,36 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}

[48/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js
deleted file mode 100644
index 447533e..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js
+++ /dev/null
@@ -1,58 +0,0 @@
-import Ember from 'ember';
-
-export default Ember.Component.extend({
-  didInsertElement: function() {
-var paging = this.get("paging") ? true : this.get("paging");
-var ordering = this.get("ordering") ? true : this.get("ordering");
-var info = this.get("info") ? true : this.get("info");
-var bFilter = this.get("bFilter") ? true : this.get("bFilter");
-
-// Defines sorter for the columns if not default.
-// Can also specify a custom sorter.
-var i;
-var colDefs = [];
-if (this.get("colTypes")) {
-  var typesArr = this.get("colTypes").split(' ');
-  var targetsArr = this.get("colTargets").split(' ');
-  for (i = 0; i < typesArr.length; i++) {
-console.log(typesArr[i] + " " + targetsArr[i]);
-colDefs.push({
-  type: typesArr[i],
-  targets: parseInt(targetsArr[i])
-});
-  }
-}
-// Defines initial column and sort order.
-var orderArr = [];
-if (this.get("colsOrder")) {
-  var cols = this.get("colsOrder").split(' ');
-  for (i = 0; i < cols.length; i++) {
-var col = cols[i].split(',');
-if (col.length != 2) {
-  continue;
-}
-var order = col[1].trim();
-if (order != 'asc' && order != 'desc') {
-  continue;
-}
-var colOrder = [];
-colOrder.push(parseInt(col[0]));
-colOrder.push(order);
-orderArr.push(colOrder);
-  }
-}
-if (orderArr.length == 0) {
-  var defaultOrder = [0, 'asc'];
-  orderArr.push(defaultOrder);
-}
-console.log(orderArr[0]);
-Ember.$('#' + this.get('table-id')).DataTable({
-  "paging":   paging,
-  "ordering": ordering, 
-  "info": info,
-  "bFilter": bFilter,
-  "order": orderArr,
-  "columnDefs": colDefs
-});
-  }
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
deleted file mode 100644
index fe402bb..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
+++ /dev/null
@@ -1,250 +0,0 @@
-import Ember from 'ember';
-import Converter from 'yarn-ui/utils/converter';
-
-export default Ember.Component.extend({
-  canvas: {
-svg: undefined,
-h: 0,
-w: 0,
-tooltip: undefined
-  },
-
-  clusterMetrics: undefined,
-  modelArr: [],
-  colors: d3.scale.category10().range(),
-  _selected: undefined,
-
-  selected: function() {
-return this._selected;
-  }.property(),
-
-  tableComponentName: function() {
-return "app-attempt-table";
-  }.property(),
-
-  setSelected: function(d) {
-if (this._selected == d) {
-  return;
-}
-
-// restore color
-if (this._selected) {
-  var dom = d3.select("#timeline-bar-" + this._selected.get("id"));
-  dom.attr("fill", this.colors[0]);
-}
-
-this._selected = d;
-this.set("selected", d);
-dom = d3.select("#timeline-bar-" + d.get("id"));
-dom.attr("fill", this.colors[1]);
-  },
-
-  getPerItemHeight: function() {
-var arrSize = this.modelArr.length;
-
-if (arrSize < 20) {
-  return 30;
-} else if (arrSize < 100) {
-  return 10;
-} else {
-  return 2;
-}
-  },
-
-  getPerItemGap: function() {
-var arrSize = this.modelArr.length;
-
-if (arrSize < 20) {
-  return 5;
-} else if (arrSize < 100) {
-  return 1;
-} else {
-  return 1;
-}
-  },
-
-  getCanvasHeight: function() {
-return (this.getPerItemHeight() + this.getPerItemGap()) * 
this.modelArr.length + 200;
-  },
-
-  draw: function(start, end) {
-// get w/h of the svg
-var bbox = d3.select("#" + this.get("parent-id"))
-  .node()
-  .getBoundingClientRect();
-this.canvas.w = bbox.width;
-this.canvas.h = this.getCanvasHeight();
-
-this.canvas.svg = d3.select("#" + this.get("parent-id"))
-  .append("svg")
-  .attr("width", this.canvas.w)
-  .attr("height", this.canvas.h)
-  .attr("id", this.get("my-id"));
-this.renderTimeline(start, end);
-  },
-
-  renderTimeline: function(start, end) {
-var border = 30;
-var singleBarHeight = 

[40/50] [abbrv] hadoop git commit: YARN-5504. [YARN-3368] Fix YARN UI build pom.xml (Sreenath Somarajapuram via Sunil G)

2016-10-21 Thread wangda
YARN-5504. [YARN-3368] Fix YARN UI build pom.xml (Sreenath Somarajapuram via 
Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/951658d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/951658d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/951658d1

Branch: refs/heads/YARN-3368
Commit: 951658d1662c4efa349347ec4d119524fc9c4c1d
Parents: 58c2e9b
Author: sunilg 
Authored: Thu Aug 25 23:21:29 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 59 +---
 .../src/main/webapp/ember-cli-build.js  |  2 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |  3 +-
 3 files changed, 17 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/951658d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 2933a76..fca8d30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -35,7 +35,7 @@
 node
 v0.12.2
 2.10.0
-false
+false
   
 
   
@@ -60,19 +60,20 @@
   
 
   
- maven-clean-plugin
- 3.0.0
- 
-false
-
-   
-  
${basedir}/src/main/webapp/bower_components
-   
-   
-  
${basedir}/src/main/webapp/node_modules
-   
-
- 
+maven-clean-plugin
+3.0.0
+
+  ${keep-ui-build-cache}
+  false
+  
+
+  
${basedir}/src/main/webapp/bower_components
+
+
+  ${basedir}/src/main/webapp/node_modules
+
+  
+
   
 
   
@@ -126,21 +127,6 @@
 
   
   
-generate-sources
-bower --allow-root install
-
-  exec
-
-
-  ${webappDir}
-  bower
-  
---allow-root
-install
-  
-
-  
-  
 ember build
 generate-sources
 
@@ -158,21 +144,6 @@
 
   
   
-ember test
-generate-resources
-
-  exec
-
-
-  ${skipTests}
-  ${webappDir}
-  ember
-  
-test
-  
-
-  
-  
 cleanup tmp
 generate-sources
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/951658d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index d21cc3e..7736c75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -22,7 +22,7 @@ var EmberApp = require('ember-cli/lib/broccoli/ember-app');
 
 module.exports = function(defaults) {
   var app = new EmberApp(defaults, {
-// Add options here
+hinting: false
   });
 
   
app.import("bower_components/datatables/media/css/jquery.dataTables.min.css");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/951658d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
index baa473a..6a4eb16 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
@@ -9,8 +9,7 @@
   },
   "scripts": {
 "build": "ember build",
-"start": "ember server",
-"test": "ember test"
+"start": "ember server"
   },
   "repository": "",
   "engines": {


-
To unsubscribe, e-mail: 

[50/50] [abbrv] hadoop git commit: YARN-5183. [YARN-3368] Support for responsive navbar when window is resized. (Kai Sasaki via Sunil G)

2016-10-21 Thread wangda
YARN-5183. [YARN-3368] Support for responsive navbar when window is resized. 
(Kai Sasaki via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9b75433
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9b75433
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9b75433

Branch: refs/heads/YARN-3368
Commit: b9b7543304b45a3cfae2d320ea542d9d463a6567
Parents: 97f5dfe
Author: Sunil 
Authored: Fri Jun 10 10:33:41 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9b75433/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index bce18ce..d21cc3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -32,6 +32,9 @@ module.exports = function(defaults) {
   app.import("bower_components/select2/dist/js/select2.min.js");
   app.import('bower_components/jquery-ui/jquery-ui.js');
   app.import('bower_components/more-js/dist/more.js');
+  app.import('bower_components/bootstrap/dist/css/bootstrap.css');
+  app.import('bower_components/bootstrap/dist/css/bootstrap-theme.css');
+  app.import('bower_components/bootstrap/dist/js/bootstrap.min.js');
 
   // Use `app.import` to add additional libraries to the generated
   // output files.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09bf289e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
new file mode 100644
index 000..d39885e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
@@ -0,0 +1,29 @@
+import DS from 'ember-data';
+
+export default DS.JSONAPISerializer.extend({
+normalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  var fixedPayload = {
+id: id,
+type: primaryModelClass.modelName,
+attributes: payload
+  };
+
+  return this._super(store, primaryModelClass, fixedPayload, id,
+requestType);
+},
+
+normalizeArrayResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  // return expected is { data: [ {}, {} ] }
+  var normalizedArrayResponse = {};
+
+  // payload has apps : { app: [ {},{},{} ]  }
+  // need some error handling for ex apps or app may not be defined.
+  normalizedArrayResponse.data = [
+this.normalizeSingleResponse(store, primaryModelClass,
+  payload.clusterMetrics, 1, requestType)
+  ];
+  return normalizedArrayResponse;
+}
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09bf289e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
new file mode 100644
index 000..c5394d0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
@@ -0,0 +1,49 @@
+import DS from 'ember-data';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.JSONAPISerializer.extend({
+internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  
+  if (payload.appAttempt) {
+payload = payload.appAttempt;  
+  }
+  
+  var fixedPayload = {
+id: payload.appAttemptId,
+type: primaryModelClass.modelName, // yarn-app
+attributes: {
+  startTime: Converter.timeStampToDate(payload.startTime),
+  finishedTime: Converter.timeStampToDate(payload.finishedTime),
+  containerId: payload.containerId,
+  nodeHttpAddress: payload.nodeHttpAddress,
+  nodeId: payload.nodeId,
+  state: payload.nodeId,
+  logsLink: payload.logsLink
+}
+  };
+
+  return fixedPayload;
+},
+
+normalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  var p = this.internalNormalizeSingleResponse(store, 
+primaryModelClass, payload, id, requestType);
+  return { data: p };
+},
+
+normalizeArrayResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  // return expected is { data: [ {}, {} ] }
+  var normalizedArrayResponse = {};
+
+  // payload has apps : { app: [ {},{},{} ]  }
+  // need some error handling for ex apps or app may not be defined.
+  normalizedArrayResponse.data = 
payload.appAttempts.appAttempt.map(singleApp => {
+return this.internalNormalizeSingleResponse(store, primaryModelClass,
+  singleApp, singleApp.id, requestType);
+  }, this);
+  return normalizedArrayResponse;
+}
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09bf289e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
new file mode 100644
index 000..a038fff
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
@@ -0,0 +1,66 @@
+import DS from 'ember-data';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.JSONAPISerializer.extend({
+internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  if (payload.app) {
+payload = payload.app;  
+  }
+  
+  var fixedPayload = {
+id: id,
+type: primaryModelClass.modelName, // yarn-app
+attributes: {
+  appName: payload.name,
+  user: payload.user,
+  queue: payload.queue,
+  state: payload.state,
+  startTime: Converter.timeStampToDate(payload.startedTime),
+  elapsedTime: 

[42/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
new file mode 100644
index 000..4e68da0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+import Constants from 'yarn-ui/constants';
+
+moduleFor('route:yarn-container-log', 'Unit | Route | ContainerLog', {
+});
+
+test('Basic creation test', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+  assert.ok(route.model);
+});
+
+test('Test getting container log', function(assert) {
+  var response = {
+  logs: "This is syslog",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve) {
+resolve(response);
+  }
+)}
+  };
+  assert.expect(6);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.containerLog);
+ assert.deepEqual(value.containerLog, response);
+ assert.ok(value.nodeInfo);
+ assert.equal(value.nodeInfo.addr, 'localhost:8042');
+ assert.equal(value.nodeInfo.id, 'localhost:64318');
+   });
+});
+
+/**
+ * This can happen when an empty response is sent from server
+ */
+test('Test non HTTP error while getting container log', function(assert) {
+  var error = {};
+  var response = {
+  logs: "",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve, reject) {
+reject(error);
+  }
+)}
+  };
+  assert.expect(6);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.containerLog);
+ assert.deepEqual(value.containerLog, response);
+ assert.ok(value.nodeInfo);
+ assert.equal(value.nodeInfo.addr, 'localhost:8042');
+ assert.equal(value.nodeInfo.id, 'localhost:64318');
+   });
+});
+
+test('Test HTTP error while getting container log', function(assert) {
+  var error = {errors: [{status: 404, responseText: 'Not Found'}]};
+  var response = {
+  logs: "",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve, reject) {
+reject(error);
+  }
+)}
+  };
+  assert.expect(5);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.errors);
+ assert.equal(value.errors.length, 1);
+ assert.equal(value.errors[0].status, 404);
+ assert.equal(value.errors[0].responseText, 'Not Found');
+   });
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-app-test.js
--
diff 

[02/50] [abbrv] hadoop git commit: HADOOP-13716. Add LambdaTestUtils class for tests; fix eventual consistency problem in contract test setup. Contributed by Steve Loughran.

2016-10-21 Thread wangda
HADOOP-13716. Add LambdaTestUtils class for tests; fix eventual consistency 
problem in contract test setup. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fbf4cd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fbf4cd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fbf4cd5

Branch: refs/heads/YARN-3368
Commit: 3fbf4cd5da13dde68b77e581ea2d4aa564c8c8b7
Parents: 6d2da38
Author: Anu Engineer 
Authored: Thu Oct 20 12:33:58 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 20 12:33:58 2016 -0700

--
 .../AbstractContractRootDirectoryTest.java  |  48 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |   6 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java | 521 +++
 .../apache/hadoop/test/TestLambdaTestUtils.java | 395 ++
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |  20 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  48 --
 6 files changed, 962 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbf4cd5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
index 0a8f464..5fba4bf 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
@@ -27,12 +27,16 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.test.LambdaTestUtils;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.deleteChildren;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.dumpStats;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.listChildren;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.toList;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk;
@@ -45,6 +49,7 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk;
 public abstract class AbstractContractRootDirectoryTest extends 
AbstractFSContractTestBase {
   private static final Logger LOG =
   LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class);
+  public static final int OBJECTSTORE_RETRY_TIMEOUT = 3;
 
   @Override
   public void setup() throws Exception {
@@ -79,23 +84,34 @@ public abstract class AbstractContractRootDirectoryTest 
extends AbstractFSContra
 // extra sanity checks here to avoid support calls about complete loss
 // of data
 skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
-Path root = new Path("/");
+final Path root = new Path("/");
 assertIsDirectory(root);
-// make sure it is clean
-FileSystem fs = getFileSystem();
-deleteChildren(fs, root, true);
-FileStatus[] children = listChildren(fs, root);
-if (children.length > 0) {
-  StringBuilder error = new StringBuilder();
-  error.append("Deletion of child entries failed, still have")
-  .append(children.length)
-  .append(System.lineSeparator());
-  for (FileStatus child : children) {
-error.append("  ").append(child.getPath())
-.append(System.lineSeparator());
-  }
-  fail(error.toString());
-}
+// make sure the directory is clean. This includes some retry logic
+// to forgive blobstores whose listings can be out of sync with the file
+// status;
+final FileSystem fs = getFileSystem();
+final AtomicInteger iterations = new AtomicInteger(0);
+final FileStatus[] originalChildren = listChildren(fs, root);
+LambdaTestUtils.eventually(
+OBJECTSTORE_RETRY_TIMEOUT,
+new Callable() {
+  @Override
+  public Void call() throws Exception {
+FileStatus[] deleted = deleteChildren(fs, root, true);
+FileStatus[] children = listChildren(fs, root);
+if (children.length > 0) {
+  fail(String.format(
+  "After %d attempts: listing after rm /* not empty"
+  + 

[11/50] [abbrv] hadoop git commit: HDFS-8410. Add computation time metrics to datanode for ECWorker. Contributed by SammiChen.

2016-10-21 Thread wangda
HDFS-8410. Add computation time metrics to datanode for ECWorker. Contributed 
by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61e30cf8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61e30cf8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61e30cf8

Branch: refs/heads/YARN-3368
Commit: 61e30cf83ca78529603d9b4c6732418da7e4d0c8
Parents: ae8bccd
Author: Andrew Wang 
Authored: Fri Oct 21 13:12:35 2016 -0700
Committer: Andrew Wang 
Committed: Fri Oct 21 13:12:41 2016 -0700

--
 .../erasurecode/StripedBlockReconstructor.java  |  3 ++
 .../datanode/metrics/DataNodeMetrics.java   | 13 +-
 .../TestDataNodeErasureCodingMetrics.java   | 43 +---
 3 files changed, 43 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61e30cf8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
index 9f9f15d..a8e9d30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
@@ -103,7 +103,10 @@ class StripedBlockReconstructor extends 
StripedReconstructor
 int[] erasedIndices = stripedWriter.getRealTargetIndices();
 ByteBuffer[] outputs = 
stripedWriter.getRealTargetBuffers(toReconstructLen);
 
+long start = System.nanoTime();
 getDecoder().decode(inputs, erasedIndices, outputs);
+long end = System.nanoTime();
+this.getDatanode().getMetrics().incrECDecodingTime(end - start);
 
 stripedWriter.updateRealTargetBuffers(toReconstructLen);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61e30cf8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index dc12787..23e15a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode.metrics;
 
 import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
+import static org.apache.hadoop.metrics2.lib.Interns.info;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -134,6 +135,8 @@ public class DataNodeMetrics {
   MutableCounterLong ecReconstructionTasks;
   @Metric("Count of erasure coding failed reconstruction tasks")
   MutableCounterLong ecFailedReconstructionTasks;
+  // Nanoseconds spent by decoding tasks.
+  MutableCounterLong ecDecodingTimeNanos;
 
   final MetricsRegistry registry = new MetricsRegistry("datanode");
   final String name;
@@ -153,7 +156,10 @@ public class DataNodeMetrics {
 sendDataPacketTransferNanosQuantiles = new MutableQuantiles[len];
 ramDiskBlocksEvictionWindowMsQuantiles = new MutableQuantiles[len];
 ramDiskBlocksLazyPersistWindowMsQuantiles = new MutableQuantiles[len];
-
+ecDecodingTimeNanos = registry.newCounter(
+info("ecDecodingTimeNanos", "Nanoseconds spent by decoding tasks"),
+(long) 0);
+
 for (int i = 0; i < len; i++) {
   int interval = intervals[i];
   packetAckRoundTripTimeNanosQuantiles[i] = registry.newQuantiles(
@@ -442,7 +448,10 @@ public class DataNodeMetrics {
   }
 
   public void setDataNodeActiveXceiversCount(int value) {
-this.dataNodeActiveXceiversCount.set(value);
+dataNodeActiveXceiversCount.set(value);
   }
 
+  public void incrECDecodingTime(long decodingTimeNanos) {
+ecDecodingTimeNanos.incr(decodingTimeNanos);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61e30cf8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
--
diff --git 

[29/50] [abbrv] hadoop git commit: YARN-5698. [YARN-3368] Launch new YARN UI under hadoop web app port. (Sunil G via wangda)

2016-10-21 Thread wangda
YARN-5698. [YARN-3368] Launch new YARN UI under hadoop web app port. (Sunil G 
via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7fbb379
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7fbb379
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7fbb379

Branch: refs/heads/YARN-3368
Commit: a7fbb379220e74860ad4a1a2af3038651b41972f
Parents: 04c6fb8
Author: Wangda Tan 
Authored: Wed Oct 12 13:22:20 2016 -0700
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 21 ++
 .../org/apache/hadoop/yarn/webapp/WebApps.java  |  8 +++
 .../src/main/resources/yarn-default.xml | 20 ++
 .../server/resourcemanager/ResourceManager.java | 68 +++-
 .../src/main/webapp/config/default-config.js|  4 +-
 5 files changed, 55 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7fbb379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index d69d85c..d068f04 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -266,25 +266,12 @@ public class YarnConfiguration extends Configuration {
   /**
* Enable YARN WebApp V2.
*/
-  public static final String RM_WEBAPP_UI2_ENABLE = RM_PREFIX
+  public static final String YARN_WEBAPP_UI2_ENABLE = "yarn."
   + "webapp.ui2.enable";
-  public static final boolean DEFAULT_RM_WEBAPP_UI2_ENABLE = false;
+  public static final boolean DEFAULT_YARN_WEBAPP_UI2_ENABLE = false;
 
-  /** The address of the RM web ui2 application. */
-  public static final String RM_WEBAPP_UI2_ADDRESS = RM_PREFIX
-  + "webapp.ui2.address";
-
-  public static final int DEFAULT_RM_WEBAPP_UI2_PORT = 8288;
-  public static final String DEFAULT_RM_WEBAPP_UI2_ADDRESS = "0.0.0.0:" +
-  DEFAULT_RM_WEBAPP_UI2_PORT;
-  
-  /** The https address of the RM web ui2 application.*/
-  public static final String RM_WEBAPP_UI2_HTTPS_ADDRESS =
-  RM_PREFIX + "webapp.ui2.https.address";
-
-  public static final int DEFAULT_RM_WEBAPP_UI2_HTTPS_PORT = 8290;
-  public static final String DEFAULT_RM_WEBAPP_UI2_HTTPS_ADDRESS = "0.0.0.0:"
-  + DEFAULT_RM_WEBAPP_UI2_HTTPS_PORT;
+  public static final String YARN_WEBAPP_UI2_WARFILE_PATH = "yarn."
+  + "webapp.ui2.war-file-path";
 
   public static final String RM_RESOURCE_TRACKER_ADDRESS =
 RM_PREFIX + "resource-tracker.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7fbb379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
index 53cb3ee..d3b37d9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -43,6 +43,7 @@ import 
org.apache.hadoop.security.http.RestCsrfPreventionFilter;
 import org.apache.hadoop.security.http.XFrameOptionsFilter;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.mortbay.jetty.webapp.WebAppContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -369,8 +370,15 @@ public class WebApps {
 }
 
 public WebApp start(WebApp webapp) {
+  return start(webapp, null);
+}
+
+public WebApp start(WebApp webapp, WebAppContext ui2Context) {
   WebApp webApp = build(webapp);
   HttpServer2 httpServer = webApp.httpServer();
+  if (ui2Context != null) {
+httpServer.addContext(ui2Context, true);
+  }
   try {
 httpServer.start();
 LOG.info("Web app " + name + " started at "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7fbb379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--

[15/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ca4351/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
new file mode 100644
index 000..21a715c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('serializer:yarn-node-app', 'Unit | Serializer | NodeApp', {
+});
+
+test('Basic creation test', function(assert) {
+  let serializer = this.subject();
+
+  assert.ok(serializer);
+  assert.ok(serializer.normalizeSingleResponse);
+  assert.ok(serializer.normalizeArrayResponse);
+  assert.ok(serializer.internalNormalizeSingleResponse);
+});
+
+test('normalizeArrayResponse test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = {
+apps: {
+  app: [{
+id:"application_1456251210105_0001", state:"FINISHED", user:"root"
+  },{
+id:"application_1456251210105_0002", state:"RUNNING",user:"root",
+containerids:["container_e38_1456251210105_0002_01_01",
+"container_e38_1456251210105_0002_01_02"]
+  }]
+}
+  };
+  assert.expect(15);
+  var response =
+  serializer.normalizeArrayResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(response.data.length, 2);
+  assert.equal(response.data[0].attributes.containers, undefined);
+  assert.equal(response.data[1].attributes.containers.length, 2);
+  assert.deepEqual(response.data[1].attributes.containers,
+  payload.apps.app[1].containerids);
+  for (var i = 0; i < 2; i++) {
+assert.equal(response.data[i].type, modelClass.modelName);
+assert.equal(response.data[i].id, payload.apps.app[i].id);
+assert.equal(response.data[i].attributes.appId, payload.apps.app[i].id);
+assert.equal(response.data[i].attributes.state, payload.apps.app[i].state);
+assert.equal(response.data[i].attributes.user, payload.apps.app[i].user);
+  }
+});
+
+test('normalizeArrayResponse no apps test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = { apps: null };
+  assert.expect(5);
+  var response =
+  serializer.normalizeArrayResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(response.data.length, 1);
+  assert.equal(response.data[0].type, modelClass.modelName);
+  assert.equal(response.data[0].id, "dummy");
+  assert.equal(response.data[0].attributes.appId, undefined);
+});
+
+test('normalizeSingleResponse test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = {
+app: {id:"application_1456251210105_0001", state:"FINISHED", user:"root"}
+  };
+  assert.expect(7);
+  var response =
+  serializer.normalizeSingleResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(payload.app.id, response.data.id);
+  assert.equal(modelClass.modelName, response.data.type);
+  assert.equal(payload.app.id, response.data.attributes.appId);
+  assert.equal(payload.app.state, response.data.attributes.state);
+  assert.equal(payload.app.user, response.data.attributes.user);
+  assert.equal(response.data.attributes.containers, undefined);
+});
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ca4351/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
new file mode 100644
index 000..1f08467
--- 

[46/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
new file mode 100644
index 000..66bf54a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -0,0 +1,207 @@
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/maven-v4_0_0.xsd;>
+  
+hadoop-yarn
+org.apache.hadoop
+3.0.0-SNAPSHOT
+  
+  4.0.0
+  org.apache.hadoop
+  hadoop-yarn-ui
+  3.0.0-SNAPSHOT
+  Apache Hadoop YARN UI
+  ${packaging.type}
+
+  
+jar
+src/main/webapp
+node
+v0.12.2
+2.10.0
+false
+  
+
+  
+
+  
+  
+org.apache.rat
+apache-rat-plugin
+
+  
+src/main/webapp/node_modules/**/*
+src/main/webapp/bower_components/**/*
+src/main/webapp/jsconfig.json
+src/main/webapp/bower.json
+src/main/webapp/package.json
+src/main/webapp/testem.json
+src/main/webapp/public/assets/images/**/*
+src/main/webapp/public/robots.txt
+public/crossdomain.xml
+  
+
+  
+
+  
+ maven-clean-plugin
+ 3.0.0
+ 
+false
+
+   
+  
${basedir}/src/main/webapp/bower_components
+   
+   
+  
${basedir}/src/main/webapp/node_modules
+   
+
+ 
+  
+
+  
+
+  
+
+  yarn-ui
+
+  
+false
+  
+
+  
+war
+  
+
+  
+
+  
+  
+exec-maven-plugin
+org.codehaus.mojo
+
+  
+generate-sources
+npm install
+
+  exec
+
+
+  ${webappDir}
+  npm
+  
+install
+  
+
+  
+  
+generate-sources
+bower install
+
+  exec
+
+
+  ${webappDir}
+  bower
+  
+--allow-root
+install
+  
+
+  
+  
+generate-sources
+bower --allow-root install
+
+  exec
+
+
+  ${webappDir}
+  bower
+  
+--allow-root
+install
+  
+
+  
+  
+ember build
+generate-sources
+
+  exec
+
+
+  ${webappDir}
+  ember
+  
+build
+-prod
+--output-path
+${basedir}/target/dist
+  
+
+  
+  
+ember test
+generate-resources
+
+  exec
+
+
+  ${skipTests}
+  ${webappDir}
+  ember
+  
+test
+  
+
+  
+  
+cleanup tmp
+generate-sources
+
+  exec
+
+
+  ${webappDir}
+  rm
+  
+-rf
+tmp
+  
+
+  
+
+  
+
+  
+  
+org.apache.maven.plugins
+maven-war-plugin
+
+  ${basedir}/src/main/webapp/WEB-INF/web.xml
+  ${basedir}/target/dist
+
+  
+
+
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt
deleted file mode 100644
index f591645..000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-# http://www.robotstxt.org
-User-agent: *
-Disallow:


[27/50] [abbrv] hadoop git commit: YARN-5145. [YARN-3368] Move new YARN UI configuration to HADOOP_CONF_DIR. (Sunil G and Kai Sasaki via wangda)

2016-10-21 Thread wangda
YARN-5145. [YARN-3368] Move new YARN UI configuration to HADOOP_CONF_DIR. 
(Sunil G and Kai Sasaki via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b009a228
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b009a228
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b009a228

Branch: refs/heads/YARN-3368
Commit: b009a22821677f73bf6ba2efdc078a0a094ee2d1
Parents: 3d04cfc
Author: Wangda Tan 
Authored: Mon Oct 17 11:30:16 2016 -0700
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../src/main/webapp/app/initializers/loader.js  | 86 
 .../tests/unit/initializers/loader-test.js  | 40 +
 2 files changed, 126 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b009a228/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
new file mode 100644
index 000..08e4dbd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+function getTimeLineURL(parameters) {
+  return '/conf?name=yarn.timeline-service.webapp.address';
+}
+
+function updateConfigs(application) {
+  var hostname = window.location.hostname;
+  var rmhost = hostname +
+(window.location.port ? ':' + window.location.port: '');
+
+  Ember.Logger.log("RM Address:" + rmhost);
+
+  if(!ENV.hosts.rmWebAddress) {
+ENV = {
+   hosts: {
+  rmWebAddress: rmhost,
+},
+};
+  }
+
+  if(!ENV.hosts.timelineWebAddress) {
+var result = [];
+var timelinehost = "";
+$.ajax({
+  type: 'GET',
+  dataType: 'json',
+  async: true,
+  context: this,
+  url: getTimeLineURL(),
+  success: function(data) {
+timelinehost = data.property.value;
+ENV.hosts.timelineWebAddress = timelinehost;
+
+var address = timelinehost.split(":")[0];
+var port = timelinehost.split(":")[1];
+
+Ember.Logger.log("Timeline Address from RM:" + address + ":" + port);
+
+if(address == "0.0.0.0" || address == "localhost") {
+  var updatedAddress =  hostname + ":" + port;
+
+  /* Timeline v2 is not supporting CORS, so make as default*/
+  ENV = {
+ hosts: {
+rmWebAddress: rmhost,
+timelineWebAddress: updatedAddress,
+  },
+  };
+  Ember.Logger.log("Timeline Updated Address:" + updatedAddress);
+}
+application.advanceReadiness();
+  },
+});
+  } else {
+application.advanceReadiness();
+  }
+}
+
+export function initialize( application ) {
+  application.deferReadiness();
+  updateConfigs(application);
+}
+
+export default {
+  name: 'loader',
+  before: 'env',
+  initialize
+};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b009a228/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
new file mode 100644
index 000..cc32e92
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this 

[08/50] [abbrv] hadoop git commit: HDFS-10730. Fix some failed tests due to BindException. Contributed by Yiqun Lin

2016-10-21 Thread wangda
HDFS-10730. Fix some failed tests due to BindException. Contributed by Yiqun Lin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f63cd78f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f63cd78f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f63cd78f

Branch: refs/heads/YARN-3368
Commit: f63cd78f6008bf7cfc9ee74217ed6f3d4f5bec5c
Parents: 754cb4e
Author: Brahma Reddy Battula 
Authored: Fri Oct 21 18:16:39 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Fri Oct 21 18:16:39 2016 +0530

--
 .../java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java   | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f63cd78f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
index d223354..b532443 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -252,7 +252,7 @@ public class TestDecommissionWithStriped {
 Thread.sleep(3000); // grace period to trigger decommissioning call
 // start datanode so that decommissioning live node will be finished
 for (DataNodeProperties dnp : stoppedDns) {
-  cluster.restartDataNode(dnp, true);
+  cluster.restartDataNode(dnp);
   LOG.info("Restarts stopped datanode:{} to trigger block reconstruction",
   dnp.datanode);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f63cd78f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
index 908ab0c..8f83ba5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
@@ -479,7 +479,7 @@ public class TestFileChecksum {
 }
 
 if (dnIdxToDie != -1) {
-  cluster.restartDataNode(dnIdxToDie, true);
+  cluster.restartDataNode(dnIdxToDie);
 }
 
 return fc;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: YARN-5741. [YARN-3368] Update UI2 documentation for new UI2 path (Kai Sasaki and Wangda Tan via Sunil G)

2016-10-21 Thread wangda
YARN-5741. [YARN-3368] Update UI2 documentation for new UI2 path (Kai Sasaki 
and Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9690f297
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9690f297
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9690f297

Branch: refs/heads/YARN-3368
Commit: 9690f297fbb6287f1a0020969834d8d9723309a5
Parents: b009a22
Author: sunilg 
Authored: Tue Oct 18 23:49:55 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../src/site/markdown/YarnUI2.md| 28 +---
 1 file changed, 12 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9690f297/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
index ff48183..f646d3d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
@@ -18,30 +18,26 @@
 Hadoop: YARN-UI V2
 =
 
-Prerequisites
--
-
-If you run RM locally in your computer for test purpose, you need the 
following things properly installed.
-
-- Install Node.js with NPM: https://nodejs.org/download
-- After Node.js installed, install `corsproxy`: `npm install -g corsproxy`.
-
-
 Configurations
 -
 
-*In yarn-site.xml*
+*In `yarn-site.xml`*
 
 | Configuration Property | Description |
 |: |: |
-| `yarn.resourcemanager.webapp.ui2.enable` | In the server side it indicates 
whether the new YARN-UI v2 is enabled or not. Defaults to `false`. |
-| `yarn.resourcemanager.webapp.ui2.address` | Specify the address of 
ResourceManager and port which host YARN-UI v2, defaults to `localhost:8288`. |
+| `yarn.webapp.ui2.enable` | *(Required)* In the server side it indicates 
whether the new YARN-UI v2 is enabled or not. Defaults to `false`. |
+| `yarn.webapp.ui2.war-file-path` | *(Optional)* WAR file path for launching 
yarn UI2 web application. By default this is empty and YARN will lookup 
required war file from classpath |
 
-*In $HADOOP_PREFIX/share/hadoop/yarn/webapps/rm/config/configs.env*
+Please note that, If you run YARN daemons locally in your machine for test 
purpose,
+you need the following configurations added to `yarn-site.xml` to enable cross
+origin (CORS) support.
 
-- Update timelineWebAddress and rmWebAddress to the actual addresses run 
resource manager and timeline server
-- If you run RM locally in you computer just for test purpose, you need to 
keep `corsproxy` running. Otherwise, you need to set `localBaseAddress` to 
empty.
+| Configuration Property | Value | Description |
+|: |: |: |
+| `yarn.timeline-service.http-cross-origin.enabled` | true | Enable CORS 
support for Timeline Server  |
+| `yarn.resourcemanager.webapp.cross-origin.enabled` | true | Enable CORS 
support for Resource Manager  |
+| `yarn.nodemanager.webapp.cross-origin.enabled` | true | Enable CORS support 
for Node Manager  |
 
 Use it
 -
-Open your browser, go to `rm-address:8288` and try it!
+Open your browser, go to `rm-address:8088/ui2` and try it!


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to remove unwanted files from rat exclusions. (Wangda Tan via Sunil G)

2016-10-21 Thread wangda
YARN-4849. Addendum patch to remove unwanted files from rat exclusions. (Wangda 
Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d04cfc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d04cfc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d04cfc0

Branch: refs/heads/YARN-3368
Commit: 3d04cfc0ba53fe1ca1b3c69301828654adf44829
Parents: a7fbb37
Author: sunilg 
Authored: Fri Oct 14 18:23:04 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 14 
 .../src/main/webapp/.editorconfig   | 34 
 2 files changed, 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d04cfc0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 440aca9..b427713 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -46,32 +46,18 @@
 apache-rat-plugin
 
   
-src/main/webapp/node_modules/**/*
-src/main/webapp/bower_components/**/*
 src/main/webapp/jsconfig.json
 src/main/webapp/bower.json
 src/main/webapp/package.json
 src/main/webapp/testem.json
-
-src/main/webapp/dist/**/*
-src/main/webapp/tmp/**/*
 src/main/webapp/public/assets/images/**/*
 src/main/webapp/public/assets/images/*
 src/main/webapp/public/robots.txt
-
-public/assets/images/**/*
 public/crossdomain.xml
-
-src/main/webapp/.tmp/**/*
 src/main/webapp/.bowerrc
-src/main/webapp/.editorconfig
 src/main/webapp/.ember-cli
-src/main/webapp/.gitignore
 src/main/webapp/.jshintrc
-src/main/webapp/.travis.yml
 src/main/webapp/.watchmanconfig
-src/main/webapp/tests/.jshintrc
-src/main/webapp/blueprints/.jshintrc
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d04cfc0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
deleted file mode 100644
index 47c5438..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
+++ /dev/null
@@ -1,34 +0,0 @@
-# EditorConfig helps developers define and maintain consistent
-# coding styles between different editors and IDEs
-# editorconfig.org
-
-root = true
-
-
-[*]
-end_of_line = lf
-charset = utf-8
-trim_trailing_whitespace = true
-insert_final_newline = true
-indent_style = space
-indent_size = 2
-
-[*.js]
-indent_style = space
-indent_size = 2
-
-[*.hbs]
-insert_final_newline = false
-indent_style = space
-indent_size = 2
-
-[*.css]
-indent_style = space
-indent_size = 2
-
-[*.html]
-indent_style = space
-indent_size = 2
-
-[*.{diff,md}]
-trim_trailing_whitespace = false


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: YARN-4911. Bad placement policy in FairScheduler causes the RM to crash

2016-10-21 Thread wangda
YARN-4911. Bad placement policy in FairScheduler causes the RM to crash


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a064865a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a064865a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a064865a

Branch: refs/heads/YARN-3368
Commit: a064865abf7dceee46d3c42eca67a04a25af9d4e
Parents: d7d87de
Author: Karthik Kambatla 
Authored: Thu Oct 20 20:57:04 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Oct 20 20:57:04 2016 -0700

--
 .../scheduler/fair/FairScheduler.java   |  6 +
 .../scheduler/fair/TestFairScheduler.java   | 28 
 .../fair/TestQueuePlacementPolicy.java  |  9 +--
 3 files changed, 41 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a064865a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 8daf0f3..d33c214 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -774,6 +774,12 @@ public class FairScheduler extends
   appRejectMsg = queueName + " is not a leaf queue";
 }
   }
+} catch (IllegalStateException se) {
+  appRejectMsg = "Unable to match app " + rmApp.getApplicationId() +
+  " to a queue placement policy, and no valid terminal queue " +
+  " placement rule is configured. Please contact an administrator " +
+  " to confirm that the fair scheduler configuration contains a " +
+  " valid terminal queue placement rule.";
 } catch (InvalidQueueNameException qne) {
   appRejectMsg = qne.getMessage();
 } catch (IOException ioe) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a064865a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 98af8b9..7535f69 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -1605,6 +1605,34 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
   }
 
   @Test
+  public void testAssignToBadDefaultQueue() throws Exception {
+conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+
+PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
+out.println("");
+out.println("");
+out.println("");
+out.println("");
+out.println("");
+out.println("");
+out.println("");
+out.close();
+scheduler.init(conf);
+scheduler.start();
+scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+RMApp rmApp1 = new MockRMApp(0, 0, RMAppState.NEW);
+
+try {
+  FSLeafQueue queue1 = scheduler.assignToQueue(rmApp1, "default",
+  "asterix");
+} catch (IllegalStateException ise) {
+  fail("Bad queue placement policy terminal rule should not throw " +
+  "exception ");
+}
+  }
+
+  @Test
   public void testAssignToNonLeafQueueReturnsNull() throws Exception {
 conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "true");
 scheduler.init(conf);


[24/50] [abbrv] hadoop git commit: YARN-5682. [YARN-3368] Fix maven build to keep all generated or downloaded files in target folder (Wangda Tan via Sunil G)

2016-10-21 Thread wangda
YARN-5682. [YARN-3368] Fix maven build to keep all generated or downloaded 
files in target folder (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04c6fb85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04c6fb85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04c6fb85

Branch: refs/heads/YARN-3368
Commit: 04c6fb85f19f993747f8e88e83dc89731442fa02
Parents: 0a7a978
Author: sunilg 
Authored: Tue Oct 4 21:07:42 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 54 
 hadoop-yarn-project/hadoop-yarn/pom.xml |  2 +-
 2 files changed, 34 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04c6fb85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index b750a73..440aca9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -31,7 +31,7 @@
 
   
 war
-src/main/webapp
+${basedir}/target/src/main/webapp
 node
 v0.12.2
 2.10.0
@@ -84,10 +84,10 @@
   false
   
 
-  
${basedir}/src/main/webapp/bower_components
+  ${webappTgtDir}/bower_components
 
 
-  ${basedir}/src/main/webapp/node_modules
+  ${webappTgtDir}/node_modules
 
   
 
@@ -109,6 +109,33 @@
 
   
 
+  
+  
+org.apache.maven.plugins
+maven-antrun-plugin
+
+  
+prepare-source-code
+generate-sources
+
+  run
+
+
+  
+
+  
+
+
+
+  
+
+  
+
+  
+
+  
+
+
   
   
 exec-maven-plugin
@@ -121,7 +148,7 @@
   exec
 
 
-  ${webappDir}
+  ${webappTgtDir}
   npm
   
 install
@@ -135,7 +162,7 @@
   exec
 
 
-  ${webappDir}
+  ${webappTgtDir}
   bower
   
 --allow-root
@@ -150,7 +177,7 @@
   exec
 
 
-  ${webappDir}
+  ${webappTgtDir}
   ember
   
 build
@@ -160,21 +187,6 @@
   
 
   
-  
-cleanup tmp
-generate-sources
-
-  exec
-
-
-  ${webappDir}
-  rm
-  
--rf
-tmp
-  
-
-  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04c6fb85/hadoop-yarn-project/hadoop-yarn/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/pom.xml
index ca78ef8..70b68d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -230,7 +230,6 @@
   
 
   
-hadoop-yarn-ui
 hadoop-yarn-api
 hadoop-yarn-common
 hadoop-yarn-server
@@ -238,5 +237,6 @@
 hadoop-yarn-site
 hadoop-yarn-client
 hadoop-yarn-registry
+hadoop-yarn-ui
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to fix license. (Wangda Tan via Sunil G)

2016-10-21 Thread wangda
YARN-4849. Addendum patch to fix license. (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58c2e9b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58c2e9b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58c2e9b2

Branch: refs/heads/YARN-3368
Commit: 58c2e9b2f95b5c2c500d2dc09ff3c7fdabe65fbf
Parents: 490c706
Author: sunilg 
Authored: Wed Aug 24 16:28:34 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 LICENSE.txt | 84 ++--
 1 file changed, 51 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58c2e9b2/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 5efbd14..05743fe 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1869,35 +1869,53 @@ be bound by any additional provisions that may appear 
in any communication from
 You. This License may not be modified without the mutual written agreement of
 the Licensor and You.
 
-For Apache Hadoop YARN Web UI component: 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/
--
-The Apache Hadoop YARN Web UI component bundles the following files under the 
MIT License:
-
- - ember v2.2.0 (http://emberjs.com/) - Copyright (c) 2014 Yehuda Katz, Tom 
Dale and Ember.js contributors
- - ember-data v2.1.0 (https://github.com/emberjs/data) - Copyright (C) 
2011-2014 Tilde, Inc. and contributors, Portions Copyright (C) 2011 
LivingSocial Inc.
- - ember-resolver v2.0.3 (https://github.com/ember-cli/ember-resolver) - 
Copyright (c) 2013 Stefan Penner and Ember App Kit Contributors
- - bootstrap v3.3.6 (http://getbootstrap.com) - Copyright (c) 2011-2014 
Twitter, Inc
- - jquery v2.1.4 (http://jquery.org) - Copyright 2005, 2014 jQuery Foundation 
and other contributors
- - jquery-ui v1.11.4 (http://jqueryui.com/) - Copyright 2014 jQuery Foundation 
and other contributors
- - datatables v1.10.8 (https://datatables.net/)
- - moment v2.10.6 (http://momentjs.com/) - Copyright (c) 2011-2015 Tim Wood, 
Iskren Chernev, Moment.js contributors
- - em-helpers v0.5.8 (https://github.com/sreenaths/em-helpers)
- - ember-array-contains-helper v1.0.2 
(https://github.com/bmeurant/ember-array-contains-helper)
- - ember-cli-app-version v0.5.8 
(https://github.com/EmberSherpa/ember-cli-app-version) - Authored by Taras 
Mankovski 
- - ember-cli-babel v5.1.6 (https://github.com/babel/ember-cli-babel) - 
Authored by Stefan Penner 
- - ember-cli-content-security-policy v0.4.0 
(https://github.com/rwjblue/ember-cli-content-security-policy)
- - ember-cli-dependency-checker v1.2.0 
(https://github.com/quaertym/ember-cli-dependency-checker) - Authored by Emre 
Unal
- - ember-cli-htmlbars v1.0.2 (https://github.com/ember-cli/ember-cli-htmlbars) 
- Authored by Robert Jackson 
- - ember-cli-htmlbars-inline-precompile v0.3.1 
(https://github.com/pangratz/ember-cli-htmlbars-inline-precompile) - Authored 
by Clemens Müller 
- - ember-cli-ic-ajax v0.2.1 (https://github.com/rwjblue/ember-cli-ic-ajax) - 
Authored by Robert Jackson 
- - ember-cli-inject-live-reload v1.4.0 
(https://github.com/rwjblue/ember-cli-inject-live-reload) - Authored by Robert 
Jackson 
- - ember-cli-qunit v1.2.1 (https://github.com/ember-cli/ember-cli-qunit) - 
Authored by Robert Jackson 
- - ember-cli-release v0.2.8 (https://github.com/lytics/ember-cli-release) - 
Authored by Robert Jackson 
- - ember-cli-sri v1.2.1 (https://github.com/jonathanKingston/ember-cli-sri) - 
Authored by Jonathan Kingston
- - ember-cli-uglify v1.2.0 (github.com/ember-cli/ember-cli-uglify) - Authored 
by Robert Jackson 
- - ember-d3 v0.1.0 (https://github.com/brzpegasus/ember-d3) - Authored by 
Estelle DeBlois
- - ember-truth-helpers v1.2.0 
(https://github.com/jmurphyau/ember-truth-helpers)
- - select2 v4.0.0 (https://select2.github.io/)
+The binary distribution of this product bundles these dependencies under the
+following license:
+bootstrap v3.3.6
+broccoli-asset-rev v2.4.2
+broccoli-funnel v1.0.1
+datatables v1.10.8
+em-helpers v0.5.13
+em-table v0.1.6
+ember v2.2.0
+ember-array-contains-helper v1.0.2
+ember-bootstrap v0.5.1
+ember-cli v1.13.13
+ember-cli-app-version v1.0.0
+ember-cli-babel v5.1.6
+ember-cli-content-security-policy v0.4.0
+ember-cli-dependency-checker v1.2.0
+ember-cli-htmlbars v1.0.2
+ember-cli-htmlbars-inline-precompile v0.3.1
+ember-cli-ic-ajax v0.2.1
+ember-cli-inject-live-reload v1.4.0
+ember-cli-jquery-ui v0.0.20

[33/50] [abbrv] hadoop git commit: YARN-4515. [YARN-3368] Support hosting web UI framework inside YARN RM. (Sunil G via wangda) YARN-5000. [YARN-3368] App attempt page is not loading when timeline ser

2016-10-21 Thread wangda
YARN-4515. [YARN-3368] Support hosting web UI framework inside YARN RM. (Sunil 
G via wangda)
YARN-5000. [YARN-3368] App attempt page is not loading when timeline server is 
not started (Sunil G via wangda)
YARN-5038. [YARN-3368] Application and Container pages shows wrong values when 
RM is stopped. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97f5dfe5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97f5dfe5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97f5dfe5

Branch: refs/heads/YARN-3368
Commit: 97f5dfe550ea655c115f270c1ba7fd05c772
Parents: bd8f0b6
Author: Wangda Tan 
Authored: Tue May 17 22:28:24 2016 -0700
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 LICENSE.txt |  2 +
 .../resources/assemblies/hadoop-yarn-dist.xml   |  7 ++
 .../hadoop/yarn/conf/YarnConfiguration.java | 23 ++
 .../src/main/resources/yarn-default.xml | 26 +++
 .../server/resourcemanager/ResourceManager.java | 76 +---
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  |  4 +-
 .../webapp/app/adapters/yarn-app-attempt.js |  4 +-
 .../webapp/app/adapters/yarn-container-log.js   |  2 +-
 .../main/webapp/app/adapters/yarn-node-app.js   | 10 ++-
 .../webapp/app/adapters/yarn-node-container.js  | 10 ++-
 .../src/main/webapp/app/adapters/yarn-node.js   |  5 +-
 .../main/webapp/app/components/timeline-view.js | 17 +++--
 .../main/webapp/app/components/tree-selector.js |  4 +-
 .../main/webapp/app/helpers/log-files-comma.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/helpers/node-menu.js|  6 +-
 .../src/main/webapp/app/helpers/node-name.js| 46 
 .../main/webapp/app/models/yarn-app-attempt.js  | 72 ++-
 .../src/main/webapp/app/models/yarn-app.js  | 14 
 .../main/webapp/app/models/yarn-container.js|  7 ++
 .../main/webapp/app/routes/yarn-app-attempt.js  |  6 +-
 .../webapp/app/serializers/yarn-app-attempt.js  |  5 +-
 .../src/main/webapp/app/serializers/yarn-app.js | 11 ++-
 .../webapp/app/serializers/yarn-container.js|  3 +-
 .../webapp/app/serializers/yarn-node-app.js |  5 +-
 .../app/serializers/yarn-node-container.js  |  5 +-
 .../main/webapp/app/serializers/yarn-rm-node.js |  5 +-
 .../main/webapp/app/templates/application.hbs   | 21 +-
 .../templates/components/app-attempt-table.hbs  | 22 +-
 .../app/templates/components/app-table.hbs  |  8 +--
 .../templates/components/container-table.hbs|  4 +-
 .../templates/components/node-menu-panel.hbs| 44 
 .../app/templates/components/timeline-view.hbs  |  2 +-
 .../src/main/webapp/app/templates/error.hbs |  2 +-
 .../webapp/app/templates/yarn-app-attempt.hbs   |  4 ++
 .../src/main/webapp/app/templates/yarn-app.hbs  |  2 +-
 .../src/main/webapp/app/templates/yarn-apps.hbs |  9 ++-
 .../main/webapp/app/templates/yarn-node-app.hbs |  4 +-
 .../webapp/app/templates/yarn-node-apps.hbs | 12 ++--
 .../app/templates/yarn-node-container.hbs   |  2 +-
 .../app/templates/yarn-node-containers.hbs  | 12 ++--
 .../src/main/webapp/app/templates/yarn-node.hbs |  2 +-
 .../main/webapp/app/templates/yarn-nodes.hbs| 10 ++-
 .../main/webapp/app/templates/yarn-queue.hbs|  8 ++-
 .../src/main/webapp/config/environment.js   |  2 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |  2 +
 .../webapp/tests/unit/helpers/node-name-test.js | 28 
 47 files changed, 486 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97f5dfe5/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 45b6cdf..5efbd14 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1882,6 +1882,7 @@ The Apache Hadoop YARN Web UI component bundles the 
following files under the MI
  - datatables v1.10.8 (https://datatables.net/)
  - moment v2.10.6 (http://momentjs.com/) - Copyright (c) 2011-2015 Tim Wood, 
Iskren Chernev, Moment.js contributors
  - em-helpers v0.5.8 (https://github.com/sreenaths/em-helpers)
+ - ember-array-contains-helper v1.0.2 
(https://github.com/bmeurant/ember-array-contains-helper)
  - ember-cli-app-version v0.5.8 
(https://github.com/EmberSherpa/ember-cli-app-version) - Authored by Taras 
Mankovski 
  - ember-cli-babel v5.1.6 (https://github.com/babel/ember-cli-babel) - 
Authored by Stefan Penner 
  - ember-cli-content-security-policy v0.4.0 
(https://github.com/rwjblue/ember-cli-content-security-policy)
@@ -1895,6 +1896,7 @@ The Apache Hadoop YARN Web UI component bundles the 
following files under the MI
  - ember-cli-sri 

[21/50] [abbrv] hadoop git commit: YARN-3334. [YARN-3368] Introduce REFRESH button in various UI pages (Sreenath Somarajapuram via Sunil G)

2016-10-21 Thread wangda
YARN-3334. [YARN-3368] Introduce REFRESH button in various UI pages (Sreenath 
Somarajapuram via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e91f960c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e91f960c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e91f960c

Branch: refs/heads/YARN-3368
Commit: e91f960ca7b0cc7f14394eadd500e7b2775eabcd
Parents: 4173864
Author: sunilg 
Authored: Wed Aug 10 06:53:13 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../app/components/app-usage-donut-chart.js |  5 ---
 .../src/main/webapp/app/components/bar-chart.js |  4 +-
 .../webapp/app/components/breadcrumb-bar.js | 31 ++
 .../main/webapp/app/components/donut-chart.js   |  8 ++--
 .../app/components/queue-usage-donut-chart.js   |  2 +-
 .../app/controllers/yarn-container-log.js   | 40 ++
 .../webapp/app/controllers/yarn-node-app.js | 36 
 .../src/main/webapp/app/routes/abstract.js  | 32 +++
 .../main/webapp/app/routes/cluster-overview.js  | 12 +-
 .../main/webapp/app/routes/yarn-app-attempt.js  |  9 +++-
 .../main/webapp/app/routes/yarn-app-attempts.js |  8 +++-
 .../src/main/webapp/app/routes/yarn-app.js  | 11 -
 .../src/main/webapp/app/routes/yarn-apps.js |  9 +++-
 .../webapp/app/routes/yarn-container-log.js | 10 -
 .../src/main/webapp/app/routes/yarn-node-app.js |  8 +++-
 .../main/webapp/app/routes/yarn-node-apps.js|  8 +++-
 .../webapp/app/routes/yarn-node-container.js|  8 +++-
 .../webapp/app/routes/yarn-node-containers.js   |  8 +++-
 .../src/main/webapp/app/routes/yarn-node.js |  9 +++-
 .../src/main/webapp/app/routes/yarn-nodes.js|  9 +++-
 .../main/webapp/app/routes/yarn-queue-apps.js   | 12 --
 .../src/main/webapp/app/routes/yarn-queue.js| 14 ---
 .../src/main/webapp/app/routes/yarn-queues.js   | 14 ---
 .../src/main/webapp/app/styles/app.css  |  6 +++
 .../webapp/app/templates/cluster-overview.hbs   |  4 +-
 .../app/templates/components/breadcrumb-bar.hbs | 22 ++
 .../webapp/app/templates/yarn-app-attempt.hbs   |  4 +-
 .../webapp/app/templates/yarn-app-attempts.hbs  |  4 +-
 .../src/main/webapp/app/templates/yarn-app.hbs  |  4 +-
 .../src/main/webapp/app/templates/yarn-apps.hbs |  4 +-
 .../webapp/app/templates/yarn-container-log.hbs |  2 +
 .../main/webapp/app/templates/yarn-node-app.hbs |  2 +
 .../webapp/app/templates/yarn-node-apps.hbs |  4 +-
 .../app/templates/yarn-node-container.hbs   |  4 +-
 .../app/templates/yarn-node-containers.hbs  |  4 +-
 .../src/main/webapp/app/templates/yarn-node.hbs |  4 +-
 .../main/webapp/app/templates/yarn-nodes.hbs|  4 +-
 .../webapp/app/templates/yarn-queue-apps.hbs|  4 +-
 .../main/webapp/app/templates/yarn-queue.hbs|  4 +-
 .../main/webapp/app/templates/yarn-queues.hbs   |  4 +-
 .../components/breadcrumb-bar-test.js   | 43 
 .../unit/controllers/yarn-container-log-test.js | 30 ++
 .../unit/controllers/yarn-node-app-test.js  | 30 ++
 43 files changed, 417 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e91f960c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
index 0baf630..90f41fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
@@ -26,7 +26,6 @@ export default BaseUsageDonutChart.extend({
   colors: d3.scale.category20().range(),
 
   draw: function() {
-this.initChart();
 var usageByApps = [];
 var avail = 100;
 
@@ -60,8 +59,4 @@ export default BaseUsageDonutChart.extend({
 this.renderDonutChart(usageByApps, this.get("title"), 
this.get("showLabels"),
   this.get("middleLabel"), "100%", "%");
   },
-
-  didInsertElement: function() {
-this.draw();
-  },
 })
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e91f960c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js
 

[35/50] [abbrv] hadoop git commit: YARN-5503. [YARN-3368] Add missing hidden files in webapp folder for deployment (Sreenath Somarajapuram via Sunil G)

2016-10-21 Thread wangda
YARN-5503. [YARN-3368] Add missing hidden files in webapp folder for deployment 
(Sreenath Somarajapuram via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23ed6816
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23ed6816
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23ed6816

Branch: refs/heads/YARN-3368
Commit: 23ed681680796002066fee91815fada96c412bc2
Parents: 045c7d5
Author: sunilg 
Authored: Tue Aug 30 20:58:35 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 19 ++-
 .../hadoop-yarn-ui/src/main/webapp/.bowerrc |  4 +++
 .../src/main/webapp/.editorconfig   | 34 
 .../hadoop-yarn-ui/src/main/webapp/.ember-cli   |  9 ++
 .../hadoop-yarn-ui/src/main/webapp/.jshintrc| 32 ++
 .../src/main/webapp/.watchmanconfig |  3 ++
 6 files changed, 100 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23ed6816/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index fca8d30..b750a73 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -30,7 +30,7 @@
   ${packaging.type}
 
   
-jar
+war
 src/main/webapp
 node
 v0.12.2
@@ -52,9 +52,26 @@
 src/main/webapp/bower.json
 src/main/webapp/package.json
 src/main/webapp/testem.json
+
+src/main/webapp/dist/**/*
+src/main/webapp/tmp/**/*
 src/main/webapp/public/assets/images/**/*
+src/main/webapp/public/assets/images/*
 src/main/webapp/public/robots.txt
+
+public/assets/images/**/*
 public/crossdomain.xml
+
+src/main/webapp/.tmp/**/*
+src/main/webapp/.bowerrc
+src/main/webapp/.editorconfig
+src/main/webapp/.ember-cli
+src/main/webapp/.gitignore
+src/main/webapp/.jshintrc
+src/main/webapp/.travis.yml
+src/main/webapp/.watchmanconfig
+src/main/webapp/tests/.jshintrc
+src/main/webapp/blueprints/.jshintrc
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23ed6816/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
new file mode 100644
index 000..959e169
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -0,0 +1,4 @@
+{
+  "directory": "bower_components",
+  "analytics": false
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23ed6816/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
new file mode 100644
index 000..47c5438
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
@@ -0,0 +1,34 @@
+# EditorConfig helps developers define and maintain consistent
+# coding styles between different editors and IDEs
+# editorconfig.org
+
+root = true
+
+
+[*]
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+
+[*.js]
+indent_style = space
+indent_size = 2
+
+[*.hbs]
+insert_final_newline = false
+indent_style = space
+indent_size = 2
+
+[*.css]
+indent_style = space
+indent_size = 2
+
+[*.html]
+indent_style = space
+indent_size = 2
+
+[*.{diff,md}]
+trim_trailing_whitespace = false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23ed6816/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli
new file mode 100644
index 000..ee64cfe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli
@@ -0,0 +1,9 @@
+{
+  /**
+Ember CLI sends analytics information by default. The data is 

[12/50] [abbrv] hadoop git commit: YARN-5679. TestAHSWebServices is failing (ajisakaa via rkanter)

2016-10-21 Thread wangda
YARN-5679. TestAHSWebServices is failing (ajisakaa via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23d7d53a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23d7d53a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23d7d53a

Branch: refs/heads/YARN-3368
Commit: 23d7d53a41c6a59efa4093ae563c45af911005d4
Parents: 61e30cf
Author: Robert Kanter 
Authored: Fri Oct 21 13:31:03 2016 -0700
Committer: Robert Kanter 
Committed: Fri Oct 21 13:31:03 2016 -0700

--
 .../hadoop/yarn/logaggregation/AggregatedLogFormat.java   | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23d7d53a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 3c4f835..02528d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -97,7 +97,9 @@ public class AggregatedLogFormat {
*/
   private static final FsPermission APP_LOG_FILE_UMASK = FsPermission
   .createImmutable((short) (0640 ^ 0777));
-
+  /** Default permission for the log file. */
+  private static final FsPermission APP_LOG_FILE_PERM =
+  FsPermission.getFileDefault().applyUMask(APP_LOG_FILE_UMASK);
 
   static {
 RESERVED_KEYS = new HashMap();
@@ -458,11 +460,10 @@ public class AggregatedLogFormat {
   @Override
   public FSDataOutputStream run() throws Exception {
 fc = FileContext.getFileContext(remoteAppLogFile.toUri(), 
conf);
-fc.setUMask(APP_LOG_FILE_UMASK);
 return fc.create(
 remoteAppLogFile,
 EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
-new Options.CreateOpts[] {});
+Options.CreateOpts.perms(APP_LOG_FILE_PERM));
   }
 });
   } catch (InterruptedException e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
new file mode 100644
index 000..f7ec020
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  // Map: 
+  map : undefined,
+
+  // Normalized data for d3
+  treeData: undefined,
+
+  // folded queues, folded[] == true means  is folded
+  foldedQueues: { },
+
+  // maxDepth
+  maxDepth: 0,
+
+  // num of leaf queue, folded queue is treated as leaf queue
+  numOfLeafQueue: 0,
+
+  // mainSvg
+  mainSvg: undefined,
+
+  // Init data
+  initData: function() {
+this.map = { };
+this.treeData = { };
+this.maxDepth = 0;
+this.numOfLeafQueue = 0;
+
+this.get("model")
+  .forEach(function(o) {
+this.map[o.id] = o;
+  }.bind(this));
+
+var selected = this.get("selected");
+
+this.initQueue("root", 1, this.treeData);
+  },
+
+  // get Children array of given queue
+  getChildrenNamesArray: function(q) {
+var namesArr = [];
+
+// Folded queue's children is empty
+if (this.foldedQueues[q.get("name")]) {
+  return namesArr;
+}
+
+var names = q.get("children");
+if (names) {
+  names.forEach(function(name) {
+namesArr.push(name);
+  });
+}
+
+return namesArr;
+  },
+
+  // Init queues
+  initQueue: function(queueName, depth, node) {
+if ((!queueName) || (!this.map[queueName])) {
+  // Queue is not existed
+  return;
+}
+
+if (depth > this.maxDepth) {
+  this.maxDepth = this.maxDepth + 1;
+}
+
+var queue = this.map[queueName];
+
+var names = this.getChildrenNamesArray(queue);
+
+node.name = queueName;
+node.parent = queue.get("parent");
+node.queueData = queue;
+
+if (names.length > 0) {
+  node.children = [];
+
+  names.forEach(function(name) {
+var childQueueData = {};
+node.children.push(childQueueData);
+this.initQueue(name, depth + 1, childQueueData);
+  }.bind(this));
+} else {
+  this.numOfLeafQueue = this.numOfLeafQueue + 1;
+}
+  },
+
+  update: function(source, root, tree, diagonal) {
+var duration = 300;
+var i = 0;
+
+// Compute the new tree layout.
+var nodes = tree.nodes(root).reverse();
+var links = tree.links(nodes);
+
+// Normalize for fixed-depth.
+nodes.forEach(function(d) { d.y = d.depth * 200; });
+
+// Update the nodes…
+var node = this.mainSvg.selectAll("g.node")
+  .data(nodes, function(d) { return d.id || (d.id = ++i); });
+
+// Enter any new nodes at the parent's previous position.
+var nodeEnter = node.enter().append("g")
+  .attr("class", "node")
+  .attr("transform", function(d) { return "translate(" + source.y0 + "," + 
source.x0 + ")"; })
+  .on("click", function(d,i){
+if (d.queueData.get("name") != this.get("selected")) {
+document.location.href = "yarnQueue/" + d.queueData.get("name");
+}
+  }.bind(this));
+  // .on("click", click);
+
+nodeEnter.append("circle")
+  .attr("r", 1e-6)
+  .style("fill", function(d) {
+var usedCap = d.queueData.get("usedCapacity");
+if (usedCap <= 60.0) {
+  return "LimeGreen";
+} else if (usedCap <= 100.0) {
+  return "DarkOrange";
+} else {
+  return "LightCoral";
+}
+  });
+
+// append percentage
+nodeEnter.append("text")
+  .attr("x", function(d) { return 0; })
+  .attr("dy", ".35em")
+  .attr("text-anchor", function(d) { return "middle"; })
+  .text(function(d) {
+var usedCap = d.queueData.get("usedCapacity");
+if (usedCap >= 100.0) {
+

[30/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to fix javadocs. (Sunil G via wangda)

2016-10-21 Thread wangda
 YARN-4849. Addendum patch to fix javadocs. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a7a9780
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a7a9780
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a7a9780

Branch: refs/heads/YARN-3368
Commit: 0a7a97807133a1832b44efb504c6f7ecb8f1a36f
Parents: 5a4801a
Author: Wangda Tan 
Authored: Fri Sep 9 10:54:37 2016 -0700
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java| 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a7a9780/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index d32f649..f739e31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -916,6 +916,12 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
* Return a HttpServer.Builder that the journalnode / namenode / secondary
* namenode can use to initialize their HTTP / HTTPS server.
*
+   * @param conf configuration object
+   * @param httpAddr HTTP address
+   * @param httpsAddr HTTPS address
+   * @param name  Name of the server
+   * @throws IOException from Builder
+   * @return builder object
*/
   public static HttpServer2.Builder httpServerTemplateForRM(Configuration conf,
   final InetSocketAddress httpAddr, final InetSocketAddress httpsAddr,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4173864c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
index 8ce4ffa..aae4177 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
@@ -16,55 +16,95 @@
  * limitations under the License.
 }}
 
-
-  {{queue-navigator model=model.queues selected=model.selected}}
+
+  {{em-breadcrumbs items=breadcrumbs}}
 
 
-
-  
-{{queue-configuration-table queue=model.selectedQueue}}
-  
+
+  
 
-  
-{{bar-chart data=model.selectedQueue.capacitiesBarChartData 
-title="Queue Capacities" 
-parentId="capacity-bar-chart"
-textWidth=150
-ratio=0.5
-maxHeight=350}}
-  
+
+  
+
+  Application
+
+
+  
+
+  {{#link-to 'yarn-queue' tagName="li"}}
+{{#link-to 'yarn-queue' model.selected}}Information
+{{/link-to}}
+  {{/link-to}}
+  {{#link-to 'yarn-queue-apps' tagName="li"}}
+{{#link-to 'yarn-queue-apps' model.selected}}Applications List
+{{/link-to}}
+  {{/link-to}}
+
+  
+
+  
+
 
-{{#if model.selectedQueue.hasUserUsages}}
-  
-{{donut-chart data=model.selectedQueue.userUsagesDonutChartData 
-title="User Usages" 
-showLabels=true
-parentId="userusage-donut-chart"
-maxHeight=350}}
-  
-{{/if}}
+
+  
+  
 
-  
-{{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData 
-title="Running Apps" 
-showLabels=true
-parentId="numapplications-donut-chart"
-ratio=0.5
-maxHeight=350}}
-  
-
+
+  
+
+  Queue Information
+
+{{queue-configuration-table queue=model.selectedQueue}}
+  
+
 
-
+
+  
+
+  Queue Capacities
+
+
+  
+  {{bar-chart data=model.selectedQueue.capacitiesBarChartData
+  title=""
+  parentId="capacity-bar-chart"
+  textWidth=170
+  ratio=0.55
+  maxHeight=350}}
+
+  
+
+
+{{#if model.selectedQueue.hasUserUsages}}
+  
+{{donut-chart data=model.selectedQueue.userUsagesDonutChartData
+title="User Usages"
+showLabels=true
+parentId="userusage-donut-chart"
+type="memory"
+ratio=0.6
+maxHeight=350}}
+  
+{{/if}}
+
+
+  
+
+  Running Apps
+
+
+  {{donut-chart 
data=model.selectedQueue.numOfApplicationsDonutChartData
+  showLabels=true
+  parentId="numapplications-donut-chart"
+  ratio=0.6
+  maxHeight=350}}
+
+  
+
+
+  
+
 
-
-  
-{{#if model.apps}}
-  {{app-table table-id="apps-table" arr=model.apps}}
-  {{simple-table table-id="apps-table" bFilter=true 
colTypes="elapsed-time" colTargets="7"}}
-{{else}}
-  Could not find any applications from this 
cluster
-{{/if}}
   
 
-
-{{outlet}}
\ No newline at end of file
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4173864c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
new file mode 100644
index 000..e27341b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
@@ -0,0 +1,72 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on 

[44/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
new file mode 100644
index 000..89858bf
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+  model(param) {
+return Ember.RSVP.hash({
+  selected : param.queue_name,
+  queues: this.store.findAll('yarnQueue'),
+  selectedQueue : undefined,
+  apps: undefined, // apps of selected queue
+});
+  },
+
+  afterModel(model) {
+model.selectedQueue = this.store.peekRecord('yarnQueue', model.selected);
+model.apps = this.store.findAll('yarnApp');
+model.apps.forEach(function(o) {
+  console.log(o);
+})
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
new file mode 100644
index 000..7da6f6d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export default Ember.Route.extend({
+  beforeModel() {
+this.transitionTo('yarnQueues.root');
+  }
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
new file mode 100644
index 000..3686c83
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+

[43/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
new file mode 100644
index 000..ca80ccd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
@@ -0,0 +1,58 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+
+  
+{{node-menu path="yarnNodeContainers" nodeAddr=model.nodeInfo.addr 
nodeId=model.nodeInfo.id}}
+
+  
+
+  
+Container ID
+Container State
+User
+Logs
+  
+
+
+  {{#if model.containers}}
+{{#each model.containers as |container|}}
+  {{#if container.isDummyContainer}}
+No containers found on this 
node
+  {{else}}
+
+  {{container.containerId}}
+  {{container.state}}
+  {{container.user}}
+  
+{{log-files-comma nodeId=model.nodeInfo.id
+nodeAddr=model.nodeInfo.addr
+containerId=container.containerId
+logFiles=container.containerLogFiles}}
+  
+
+  {{/if}}
+{{/each}}
+  {{/if}}
+
+  
+  {{simple-table table-id="node-containers-table" bFilter=true 
colsOrder="0,desc" colTypes="natural" colTargets="0"}}
+
+  
+
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
new file mode 100644
index 000..a036076
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
@@ -0,0 +1,94 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+
+  
+{{node-menu path="yarnNode" nodeId=model.rmNode.id nodeAddr=model.node.id}}
+
+  
+Node Information
+  
+
+  
+Total Vmem allocated for Containers
+{{divide num=model.node.totalVmemAllocatedContainersMB 
den=1024}} GB
+  
+  
+Vmem enforcement enabled
+{{model.node.vmemCheckEnabled}}
+  
+  
+Total Pmem allocated for Containers
+{{divide num=model.node.totalPmemAllocatedContainersMB 
den=1024}} GB
+  
+  
+Pmem enforcement enabled
+{{model.node.pmemCheckEnabled}}
+  
+  
+Total VCores allocated for Containers
+{{model.node.totalVCoresAllocatedContainers}}
+  
+  
+Node Healthy Status
+

[28/50] [abbrv] hadoop git commit: YARN-5488. [YARN-3368] Applications table overflows beyond the page boundary(Harish Jaiprakash via Sunil G)

2016-10-21 Thread wangda
YARN-5488. [YARN-3368] Applications table overflows beyond the page 
boundary(Harish Jaiprakash via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fbb0f20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fbb0f20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fbb0f20

Branch: refs/heads/YARN-3368
Commit: 9fbb0f20701030ff6f89f5962588c31f806c021b
Parents: 452a334
Author: sunilg 
Authored: Fri Aug 12 14:51:03 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../src/main/webapp/app/styles/app.css  |  4 +
 .../src/main/webapp/app/templates/yarn-app.hbs  | 98 ++--
 2 files changed, 54 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fbb0f20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
index a68a0ac..da5b4bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
@@ -273,3 +273,7 @@ li a.navigation-link.ember-view {
   right: 20px;
   top: 3px;
 }
+
+.x-scroll {
+  overflow-x: scroll;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fbb0f20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
index 49c4bfd..9e92fc1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
@@ -49,55 +49,57 @@
 
   
 Basic Info
-
-  
-
-  Application ID
-  Name
-  User
-  Queue
-  State
-  Final Status
-  Start Time
-  Elapsed Time
-  Finished Time
-  Priority
-  Progress
-  Is Unmanaged AM
-
-  
+
+  
+
+  
+Application ID
+Name
+User
+Queue
+State
+Final Status
+Start Time
+Elapsed Time
+Finished Time
+Priority
+Progress
+Is Unmanaged AM
+  
+
 
-  
-
-  {{model.app.id}}
-  {{model.app.appName}}
-  {{model.app.user}}
-  {{model.app.queue}}
-  {{model.app.state}}
-  
-
-  {{model.app.finalStatus}}
-
-  
-  {{model.app.startTime}}
-  {{model.app.elapsedTime}}
-  {{model.app.validatedFinishedTs}}
-  {{model.app.priority}}
-  
-
-  
-{{model.app.progress}}%
+
+  
+{{model.app.id}}
+{{model.app.appName}}
+{{model.app.user}}
+{{model.app.queue}}
+{{model.app.state}}
+
+  
+{{model.app.finalStatus}}
+  
+
+{{model.app.startTime}}
+{{model.app.elapsedTime}}
+{{model.app.validatedFinishedTs}}
+{{model.app.priority}}
+
+  
+
+  {{model.app.progress}}%
+
   
-
-  
-  {{model.app.unmanagedApplication}}
-
-  
-
+
+{{model.app.unmanagedApplication}}
+  
+
+  
+ 

[38/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4173864c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
index ff49403..b945451 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
@@ -20,7 +20,9 @@ import Ember from 'ember';
 
 export default Ember.Route.extend({
   model() {
-var apps = this.store.findAll('yarn-app');
-return apps;
+return Ember.RSVP.hash({
+  apps: this.store.findAll('yarn-app'),
+  clusterMetrics: this.store.findAll('ClusterMetric'),
+});
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4173864c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
new file mode 100644
index 000..8719170
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4173864c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
new file mode 100644
index 000..8719170
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4173864c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
index 6e57388..64a1b3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
@@ -22,6 +22,7 @@ export default Ember.Route.extend({
   model(param) {
 // Fetches data from both NM and RM. RM is queried to get node usage info.
 return Ember.RSVP.hash({
+  nodeInfo: { id: param.node_id, addr: param.node_addr },
   node: this.store.findRecord('yarn-node', param.node_addr),
   rmNode: this.store.findRecord('yarn-rm-node', param.node_id)
 });


[39/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-10-21 Thread wangda
YARN-5321. [YARN-3368] Add resource usage for application by node managers 
(Wangda Tan via Sunil G)
YARN-5320. [YARN-3368] Add resource usage by applications and queues to cluster 
overview page  (Wangda Tan via Sunil G)
YARN-5322. [YARN-3368] Add a node heat chart map (Wangda Tan via Sunil G)
YARN-5347. [YARN-3368] Applications page improvements (Sreenath Somarajapuram 
via Sunil G)
YARN-5348. [YARN-3368] Node details page improvements (Sreenath Somarajapuram 
via Sunil G)
YARN-5346. [YARN-3368] Queues page improvements (Sreenath Somarajapuram via 
Sunil G)
YARN-5345. [YARN-3368] Cluster overview page improvements (Sreenath 
Somarajapuram via Sunil G)
YARN-5344. [YARN-3368] Generic UI improvements (Sreenath Somarajapuram via 
Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4173864c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4173864c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4173864c

Branch: refs/heads/YARN-3368
Commit: 4173864c8263822774758d332ec51e94508e7e8c
Parents: fab0f6f
Author: Sunil 
Authored: Fri Jul 15 21:16:06 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../src/main/webapp/app/adapters/yarn-app.js|  14 +
 .../app/components/app-usage-donut-chart.js |  67 
 .../src/main/webapp/app/components/bar-chart.js |   5 +
 .../app/components/base-chart-component.js  |  55 ++-
 .../app/components/base-usage-donut-chart.js|  43 +++
 .../main/webapp/app/components/donut-chart.js   |  55 ++-
 .../main/webapp/app/components/nodes-heatmap.js | 209 +++
 ...er-app-memusage-by-nodes-stacked-barchart.js |  88 +
 ...app-ncontainers-by-nodes-stacked-barchart.js |  67 
 .../app/components/queue-usage-donut-chart.js   |  69 
 .../main/webapp/app/components/queue-view.js|   3 +-
 .../main/webapp/app/components/simple-table.js  |   9 +-
 .../webapp/app/components/stacked-barchart.js   | 198 +++
 .../main/webapp/app/components/timeline-view.js |   2 +-
 .../main/webapp/app/components/tree-selector.js |  43 ++-
 .../webapp/app/controllers/cluster-overview.js  |   9 +
 .../webapp/app/controllers/yarn-app-attempt.js  |  40 +++
 .../webapp/app/controllers/yarn-app-attempts.js |  40 +++
 .../src/main/webapp/app/controllers/yarn-app.js |  38 ++
 .../main/webapp/app/controllers/yarn-apps.js|   9 +
 .../webapp/app/controllers/yarn-node-apps.js|  39 +++
 .../app/controllers/yarn-node-containers.js |  39 +++
 .../main/webapp/app/controllers/yarn-node.js|  37 ++
 .../app/controllers/yarn-nodes-heatmap.js   |  36 ++
 .../main/webapp/app/controllers/yarn-nodes.js   |  33 ++
 .../webapp/app/controllers/yarn-queue-apps.js   |  46 +++
 .../main/webapp/app/controllers/yarn-queue.js   |  20 ++
 .../main/webapp/app/controllers/yarn-queues.js  |  34 ++
 .../webapp/app/controllers/yarn-services.js |  34 ++
 .../main/webapp/app/models/cluster-metric.js|   2 +-
 .../main/webapp/app/models/yarn-app-attempt.js  |  11 +
 .../src/main/webapp/app/models/yarn-app.js  |   4 +
 .../src/main/webapp/app/models/yarn-rm-node.js  |   7 +
 .../src/main/webapp/app/router.js   |  15 +-
 .../src/main/webapp/app/routes/application.js   |   2 +
 .../main/webapp/app/routes/cluster-overview.js  |   9 +-
 .../main/webapp/app/routes/yarn-app-attempts.js |  30 ++
 .../src/main/webapp/app/routes/yarn-app.js  |  17 +-
 .../src/main/webapp/app/routes/yarn-apps.js |   6 +-
 .../main/webapp/app/routes/yarn-apps/apps.js|  22 ++
 .../webapp/app/routes/yarn-apps/services.js |  22 ++
 .../src/main/webapp/app/routes/yarn-node.js |   1 +
 .../src/main/webapp/app/routes/yarn-nodes.js|   5 +-
 .../webapp/app/routes/yarn-nodes/heatmap.js |  22 ++
 .../main/webapp/app/routes/yarn-nodes/table.js  |  22 ++
 .../main/webapp/app/routes/yarn-queue-apps.js   |  36 ++
 .../src/main/webapp/app/routes/yarn-queues.js   |  38 ++
 .../webapp/app/serializers/yarn-app-attempt.js  |  19 +-
 .../src/main/webapp/app/serializers/yarn-app.js |   8 +-
 .../webapp/app/serializers/yarn-container.js|  20 +-
 .../src/main/webapp/app/styles/app.css  | 139 ++--
 .../main/webapp/app/templates/application.hbs   |  99 --
 .../webapp/app/templates/cluster-overview.hbs   | 168 ++---
 .../app/templates/components/app-table.hbs  |  10 +-
 .../templates/components/node-menu-panel.hbs|   2 +-
 .../app/templates/components/nodes-heatmap.hbs  |  27 ++
 .../components/queue-configuration-table.hbs|   4 -
 .../templates/components/queue-navigator.hbs|  14 +-
 .../app/templates/components/timeline-view.hbs  |   3 +-
 .../webapp/app/templates/yarn-app-attempt.hbs   |  13 +-
 .../webapp/app/templates/yarn-app-attempts.hbs  |  57 +++
 .../src/main/webapp/app/templates/yarn-app.hbs  | 346 ---
 

[34/50] [abbrv] hadoop git commit: YARN-5019. [YARN-3368] Change urls in new YARN ui from camel casing to hyphens. (Sunil G via wangda)

2016-10-21 Thread wangda
YARN-5019. [YARN-3368] Change urls in new YARN ui from camel casing to hyphens. 
(Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd8f0b61
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd8f0b61
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd8f0b61

Branch: refs/heads/YARN-3368
Commit: bd8f0b61f8ce0c7234ddb77dd97408aaf1e0d88a
Parents: 5468dd8
Author: Wangda Tan 
Authored: Mon May 9 11:29:59 2016 -0700
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../main/webapp/app/components/tree-selector.js |  4 +--
 .../main/webapp/app/controllers/application.js  | 16 +-
 .../main/webapp/app/helpers/log-files-comma.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/helpers/node-menu.js| 12 
 .../main/webapp/app/models/yarn-app-attempt.js  |  2 +-
 .../src/main/webapp/app/router.js   | 32 ++--
 .../src/main/webapp/app/routes/index.js |  2 +-
 .../main/webapp/app/routes/yarn-app-attempt.js  |  6 ++--
 .../src/main/webapp/app/routes/yarn-app.js  |  4 +--
 .../src/main/webapp/app/routes/yarn-apps.js |  2 +-
 .../webapp/app/routes/yarn-container-log.js |  2 +-
 .../src/main/webapp/app/routes/yarn-node-app.js |  2 +-
 .../main/webapp/app/routes/yarn-node-apps.js|  2 +-
 .../webapp/app/routes/yarn-node-container.js|  2 +-
 .../webapp/app/routes/yarn-node-containers.js   |  2 +-
 .../src/main/webapp/app/routes/yarn-node.js |  4 +--
 .../src/main/webapp/app/routes/yarn-nodes.js|  2 +-
 .../src/main/webapp/app/routes/yarn-queue.js|  6 ++--
 .../main/webapp/app/routes/yarn-queues/index.js |  2 +-
 .../app/routes/yarn-queues/queues-selector.js   |  2 +-
 .../app/templates/components/app-table.hbs  |  4 +--
 .../webapp/app/templates/yarn-container-log.hbs |  2 +-
 .../main/webapp/app/templates/yarn-node-app.hbs |  4 +--
 .../webapp/app/templates/yarn-node-apps.hbs |  4 +--
 .../app/templates/yarn-node-container.hbs   |  2 +-
 .../app/templates/yarn-node-containers.hbs  |  4 +--
 .../src/main/webapp/app/templates/yarn-node.hbs |  2 +-
 28 files changed, 66 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd8f0b61/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
index f7ec020..698c253 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -126,7 +126,7 @@ export default Ember.Component.extend({
   .attr("transform", function(d) { return "translate(" + source.y0 + "," + 
source.x0 + ")"; })
   .on("click", function(d,i){
 if (d.queueData.get("name") != this.get("selected")) {
-document.location.href = "yarnQueue/" + d.queueData.get("name");
+document.location.href = "yarn-queue/" + d.queueData.get("name");
 }
   }.bind(this));
   // .on("click", click);
@@ -176,7 +176,7 @@ export default Ember.Component.extend({
   .attr("r", 20)
   .attr("href", 
 function(d) {
-  return "yarnQueues/" + d.queueData.get("name");
+  return "yarn-queues/" + d.queueData.get("name");
 })
   .style("stroke", function(d) {
 if (d.queueData.get("name") == this.get("selected")) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd8f0b61/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
index 3c68365..2effb13 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
@@ -29,25 +29,25 @@ export default Ember.Controller.extend({
   outputMainMenu: function(){
 var path = this.get('currentPath');
 var html = 'Queues' +
+html = html + '>Queues' +
 '(current)

[49/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-21 Thread wangda
YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to 
mvn, and fix licenses. (wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13b8251d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13b8251d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13b8251d

Branch: refs/heads/YARN-3368
Commit: 13b8251dcc363948be4414c862723fc65d0c1d35
Parents: 16ca435
Author: Wangda Tan 
Authored: Mon Mar 21 14:03:13 2016 -0700
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .gitignore  |  13 +
 BUILDING.txt|   4 +-
 LICENSE.txt |  80 +
 dev-support/create-release.sh   | 144 +
 dev-support/docker/Dockerfile   |   5 +
 .../src/site/markdown/YarnUI2.md|  43 +++
 .../hadoop-yarn/hadoop-yarn-ui/.bowerrc |   4 -
 .../hadoop-yarn/hadoop-yarn-ui/.editorconfig|  34 ---
 .../hadoop-yarn/hadoop-yarn-ui/.ember-cli   |  11 -
 .../hadoop-yarn/hadoop-yarn-ui/.gitignore   |  17 --
 .../hadoop-yarn/hadoop-yarn-ui/.jshintrc|  32 --
 .../hadoop-yarn/hadoop-yarn-ui/.travis.yml  |  23 --
 .../hadoop-yarn/hadoop-yarn-ui/.watchmanconfig  |   3 -
 .../hadoop-yarn/hadoop-yarn-ui/README.md|  24 --
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |  20 --
 .../app/adapters/cluster-metric.js  |  20 --
 .../app/adapters/yarn-app-attempt.js|  32 --
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |  26 --
 .../app/adapters/yarn-container-log.js  |  74 -
 .../app/adapters/yarn-container.js  |  43 ---
 .../app/adapters/yarn-node-app.js   |  63 
 .../app/adapters/yarn-node-container.js |  64 
 .../hadoop-yarn-ui/app/adapters/yarn-node.js|  40 ---
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |  20 --
 .../hadoop-yarn-ui/app/adapters/yarn-rm-node.js |  45 ---
 .../hadoop-yarn/hadoop-yarn-ui/app/app.js   |  20 --
 .../hadoop-yarn-ui/app/components/.gitkeep  |   0
 .../app/components/app-attempt-table.js |   4 -
 .../hadoop-yarn-ui/app/components/app-table.js  |   4 -
 .../hadoop-yarn-ui/app/components/bar-chart.js  | 104 ---
 .../app/components/base-chart-component.js  | 109 ---
 .../app/components/container-table.js   |   4 -
 .../app/components/donut-chart.js   | 148 --
 .../app/components/item-selector.js |  21 --
 .../app/components/queue-configuration-table.js |   4 -
 .../app/components/queue-navigator.js   |   4 -
 .../hadoop-yarn-ui/app/components/queue-view.js | 272 -
 .../app/components/simple-table.js  |  58 
 .../app/components/timeline-view.js | 250 
 .../app/components/tree-selector.js | 257 
 .../hadoop-yarn/hadoop-yarn-ui/app/config.js|  27 --
 .../hadoop-yarn/hadoop-yarn-ui/app/constants.js |  24 --
 .../hadoop-yarn-ui/app/controllers/.gitkeep |   0
 .../app/controllers/application.js  |  55 
 .../app/controllers/cluster-overview.js |   5 -
 .../hadoop-yarn-ui/app/controllers/yarn-apps.js |   4 -
 .../app/controllers/yarn-queue.js   |   6 -
 .../hadoop-yarn-ui/app/helpers/.gitkeep |   0
 .../hadoop-yarn-ui/app/helpers/divide.js|  31 --
 .../app/helpers/log-files-comma.js  |  48 ---
 .../hadoop-yarn-ui/app/helpers/node-link.js |  37 ---
 .../hadoop-yarn-ui/app/helpers/node-menu.js |  66 -
 .../hadoop-yarn/hadoop-yarn-ui/app/index.html   |  25 --
 .../hadoop-yarn-ui/app/models/.gitkeep  |   0
 .../hadoop-yarn-ui/app/models/cluster-info.js   |  13 -
 .../hadoop-yarn-ui/app/models/cluster-metric.js | 115 
 .../app/models/yarn-app-attempt.js  |  44 ---
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  65 -
 .../app/models/yarn-container-log.js|  25 --
 .../hadoop-yarn-ui/app/models/yarn-container.js |  39 ---
 .../hadoop-yarn-ui/app/models/yarn-node-app.js  |  44 ---
 .../app/models/yarn-node-container.js   |  57 
 .../hadoop-yarn-ui/app/models/yarn-node.js  |  33 ---
 .../hadoop-yarn-ui/app/models/yarn-queue.js |  76 -
 .../hadoop-yarn-ui/app/models/yarn-rm-node.js   |  92 --
 .../hadoop-yarn-ui/app/models/yarn-user.js  |   8 -
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  29 --
 .../hadoop-yarn-ui/app/routes/.gitkeep  |   0
 .../hadoop-yarn-ui/app/routes/application.js|  38 ---
 .../app/routes/cluster-overview.js  |  11 -
 .../hadoop-yarn-ui/app/routes/index.js  |  29 --
 .../app/routes/yarn-app-attempt.js  |  21 --
 

[47/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
deleted file mode 100644
index c5394d0..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
+++ /dev/null
@@ -1,49 +0,0 @@
-import DS from 'ember-data';
-import Converter from 'yarn-ui/utils/converter';
-
-export default DS.JSONAPISerializer.extend({
-internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  
-  if (payload.appAttempt) {
-payload = payload.appAttempt;  
-  }
-  
-  var fixedPayload = {
-id: payload.appAttemptId,
-type: primaryModelClass.modelName, // yarn-app
-attributes: {
-  startTime: Converter.timeStampToDate(payload.startTime),
-  finishedTime: Converter.timeStampToDate(payload.finishedTime),
-  containerId: payload.containerId,
-  nodeHttpAddress: payload.nodeHttpAddress,
-  nodeId: payload.nodeId,
-  state: payload.nodeId,
-  logsLink: payload.logsLink
-}
-  };
-
-  return fixedPayload;
-},
-
-normalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  var p = this.internalNormalizeSingleResponse(store, 
-primaryModelClass, payload, id, requestType);
-  return { data: p };
-},
-
-normalizeArrayResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  // return expected is { data: [ {}, {} ] }
-  var normalizedArrayResponse = {};
-
-  // payload has apps : { app: [ {},{},{} ]  }
-  // need some error handling for ex apps or app may not be defined.
-  normalizedArrayResponse.data = 
payload.appAttempts.appAttempt.map(singleApp => {
-return this.internalNormalizeSingleResponse(store, primaryModelClass,
-  singleApp, singleApp.id, requestType);
-  }, this);
-  return normalizedArrayResponse;
-}
-});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b8251d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
deleted file mode 100644
index a038fff..000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
+++ /dev/null
@@ -1,66 +0,0 @@
-import DS from 'ember-data';
-import Converter from 'yarn-ui/utils/converter';
-
-export default DS.JSONAPISerializer.extend({
-internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  if (payload.app) {
-payload = payload.app;  
-  }
-  
-  var fixedPayload = {
-id: id,
-type: primaryModelClass.modelName, // yarn-app
-attributes: {
-  appName: payload.name,
-  user: payload.user,
-  queue: payload.queue,
-  state: payload.state,
-  startTime: Converter.timeStampToDate(payload.startedTime),
-  elapsedTime: Converter.msToElapsedTime(payload.elapsedTime),
-  finishedTime: Converter.timeStampToDate(payload.finishedTime),
-  finalStatus: payload.finalStatus,
-  progress: payload.progress,
-  diagnostics: payload.diagnostics,
-  amContainerLogs: payload.amContainerLogs,
-  amHostHttpAddress: payload.amHostHttpAddress,
-  logAggregationStatus: payload.logAggregationStatus,
-  unmanagedApplication: payload.unmanagedApplication,
-  amNodeLabelExpression: payload.amNodeLabelExpression,
-  priority: payload.priority,
-  allocatedMB: payload.allocatedMB,
-  allocatedVCores: payload.allocatedVCores,
-  runningContainers: payload.runningContainers,
-  memorySeconds: payload.memorySeconds,
-  vcoreSeconds: payload.vcoreSeconds,
-  preemptedResourceMB: payload.preemptedResourceMB,
-  preemptedResourceVCores: payload.preemptedResourceVCores,
-  numNonAMContainerPreempted: payload.numNonAMContainerPreempted,
-  numAMContainerPreempted: payload.numAMContainerPreempted
-}
-  };
-
-  return fixedPayload;
-},
-
-normalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  var p = this.internalNormalizeSingleResponse(store, 
-primaryModelClass, payload, id, requestType);
-  return { data: p };
-},
-
-normalizeArrayResponse(store, primaryModelClass, payload, id,
-  requestType) {

[25/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to fix ASF warnings. (Wangda Tan via Sunil G)

2016-10-21 Thread wangda
YARN-4849. Addendum patch to fix ASF warnings. (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a313bb13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a313bb13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a313bb13

Branch: refs/heads/YARN-3368
Commit: a313bb131d3aa28269d0784d558fa6ca5805fb80
Parents: 23ed681
Author: sunilg 
Authored: Wed Aug 31 23:43:02 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../assets/images/datatables/Sorting icons.psd | Bin 27490 -> 0 bytes
 .../public/assets/images/datatables/favicon.ico| Bin 894 -> 0 bytes
 .../public/assets/images/datatables/sort_asc.png   | Bin 160 -> 0 bytes
 .../assets/images/datatables/sort_asc_disabled.png | Bin 148 -> 0 bytes
 .../public/assets/images/datatables/sort_both.png  | Bin 201 -> 0 bytes
 .../public/assets/images/datatables/sort_desc.png  | Bin 158 -> 0 bytes
 .../images/datatables/sort_desc_disabled.png   | Bin 146 -> 0 bytes
 7 files changed, 0 insertions(+), 0 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a313bb13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/Sorting
 icons.psd
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/Sorting
 icons.psd 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/Sorting
 icons.psd
deleted file mode 100644
index 53b2e06..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/Sorting
 icons.psd and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a313bb13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/favicon.ico
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/favicon.ico
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/favicon.ico
deleted file mode 100644
index 6eeaa2a..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/favicon.ico
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a313bb13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc.png
deleted file mode 100644
index e1ba61a..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc.png
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a313bb13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc_disabled.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc_disabled.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc_disabled.png
deleted file mode 100644
index fb11dfe..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc_disabled.png
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a313bb13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_both.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_both.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_both.png
deleted file mode 100644
index af5bc7c..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_both.png
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a313bb13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_desc.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_desc.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_desc.png
deleted file mode 100644
index 0e156de..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_desc.png
 and /dev/null differ


[26/50] [abbrv] hadoop git commit: YARN-5161. [YARN-3368] Add Apache Hadoop logo in YarnUI home page. (Kai Sasaki via Sunil G)

2016-10-21 Thread wangda
YARN-5161. [YARN-3368] Add Apache Hadoop logo in YarnUI home page. (Kai Sasaki 
via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fab0f6ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fab0f6ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fab0f6ff

Branch: refs/heads/YARN-3368
Commit: fab0f6ffab0310458e8d7199ad3c07db10895db1
Parents: b9b7543
Author: Sunil 
Authored: Mon Jul 11 14:31:25 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../src/main/webapp/app/styles/app.css |  11 +++
 .../src/main/webapp/app/templates/application.hbs  |  12 +++-
 .../webapp/public/assets/images/hadoop_logo.png| Bin 0 -> 26495 bytes
 3 files changed, 18 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fab0f6ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
index bcb6aab..e2d09dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
@@ -157,3 +157,14 @@ table.dataTable thead .sorting_desc_disabled {
   stroke: #ccc;  
   stroke-width: 2px;
 }
+
+.hadoop-brand-image {
+  margin-top: -10px;
+  width: auto;
+  height: 45px;
+}
+
+li a.navigation-link.ember-view {
+  color: #2196f3;
+  font-weight: bold;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fab0f6ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
index b45ec6b..03b2c4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
@@ -20,35 +20,37 @@
   
 
 
+  
+
+  
   
 Toggle navigation
 
 
 
   
-  Apache Hadoop YARN
 
 
 
 
   
 {{#link-to 'yarn-queue' 'root' tagName="li"}}
-  {{#link-to 'yarn-queue' 'root'}}Queues
+  {{#link-to 'yarn-queue' 'root' class="navigation-link"}}Queues
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'yarn-apps' tagName="li"}}
-  {{#link-to 'yarn-apps'}}Applications
+  {{#link-to 'yarn-apps' class="navigation-link"}}Applications
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'cluster-overview' tagName="li"}}
-  {{#link-to 'cluster-overview'}}Cluster Overview
+  {{#link-to 'cluster-overview' class="navigation-link"}}Cluster 
Overview
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'yarn-nodes' tagName="li"}}
-  {{#link-to 'yarn-nodes'}}Nodes
+  {{#link-to 'yarn-nodes' class="navigation-link"}}Nodes
 (current)
   {{/link-to}}
 {{/link-to}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fab0f6ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
new file mode 100644
index 000..275d39e
Binary files /dev/null and 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: YARN-4514. [YARN-3368] Cleanup hardcoded configurations, such as RM/ATS addresses. (Sunil G via wangda)

2016-10-21 Thread wangda
YARN-4514. [YARN-3368] Cleanup hardcoded configurations, such as RM/ATS 
addresses. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5468dd81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5468dd81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5468dd81

Branch: refs/heads/YARN-3368
Commit: 5468dd81daed22dddef95fc1e6ce1abffa5c0cba
Parents: 13b8251
Author: Wangda Tan 
Authored: Sat Apr 16 23:04:45 2016 -0700
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../src/main/webapp/app/adapters/abstract.js| 48 +
 .../main/webapp/app/adapters/cluster-info.js| 22 ++
 .../main/webapp/app/adapters/cluster-metric.js  | 22 ++
 .../webapp/app/adapters/yarn-app-attempt.js | 24 ++-
 .../src/main/webapp/app/adapters/yarn-app.js| 27 ++-
 .../webapp/app/adapters/yarn-container-log.js   | 10 ++-
 .../main/webapp/app/adapters/yarn-container.js  | 20 +++---
 .../main/webapp/app/adapters/yarn-node-app.js   | 24 +++
 .../webapp/app/adapters/yarn-node-container.js  | 24 +++
 .../src/main/webapp/app/adapters/yarn-node.js   | 23 +++---
 .../src/main/webapp/app/adapters/yarn-queue.js  | 22 ++
 .../main/webapp/app/adapters/yarn-rm-node.js| 21 ++
 .../hadoop-yarn-ui/src/main/webapp/app/app.js   |  4 +-
 .../src/main/webapp/app/config.js   |  5 +-
 .../src/main/webapp/app/index.html  |  1 +
 .../src/main/webapp/app/initializers/env.js | 29 
 .../src/main/webapp/app/initializers/hosts.js   | 28 
 .../src/main/webapp/app/services/env.js | 59 
 .../src/main/webapp/app/services/hosts.js   | 74 
 .../hadoop-yarn-ui/src/main/webapp/bower.json   | 25 +++
 .../src/main/webapp/config/configs.env  | 48 +
 .../src/main/webapp/config/default-config.js| 32 +
 .../src/main/webapp/config/environment.js   | 11 ++-
 .../src/main/webapp/ember-cli-build.js  | 10 ++-
 .../hadoop-yarn-ui/src/main/webapp/package.json | 35 -
 .../webapp/tests/unit/initializers/env-test.js  | 41 +++
 .../tests/unit/initializers/hosts-test.js   | 41 +++
 .../tests/unit/initializers/jquery-test.js  | 41 +++
 .../main/webapp/tests/unit/services/env-test.js | 30 
 .../webapp/tests/unit/services/hosts-test.js| 30 
 30 files changed, 637 insertions(+), 194 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468dd81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
new file mode 100644
index 000..c7e5c36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import Ember from 'ember';
+
+export default DS.JSONAPIAdapter.extend({
+  address: null, //Must be set by inheriting classes
+  restNameSpace: null, //Must be set by inheriting classes
+  serverName: null, //Must be set by inheriting classes
+
+  headers: {
+Accept: 'application/json'
+  },
+
+  host: Ember.computed("address", function () {
+var address = this.get("address");
+return this.get(`hosts.${address}`);
+  }),
+
+  namespace: Ember.computed("restNameSpace", function () {
+var serverName = this.get("restNameSpace");
+return this.get(`env.app.namespaces.${serverName}`);
+  }),
+
+  ajax: function(url, method, options) {
+options = options || {};
+options.crossDomain = true;
+options.xhrFields = {
+  withCredentials: true
+};
+options.targetServer = this.get('serverName');
+return this._super(url, method, 

[17/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-10-21 Thread wangda
YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16ca4351
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16ca4351
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16ca4351

Branch: refs/heads/YARN-3368
Commit: 16ca43517843da0ed1da79c1f991f2f5d9bc84c3
Parents: 09bf289
Author: Wangda Tan 
Authored: Mon Mar 21 13:13:02 2016 -0700
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |   5 +-
 .../app/adapters/cluster-metric.js  |   5 +-
 .../app/adapters/yarn-app-attempt.js|   3 +-
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |   3 +-
 .../app/adapters/yarn-container-log.js  |  74 +
 .../app/adapters/yarn-container.js  |   5 +-
 .../app/adapters/yarn-node-app.js   |  63 
 .../app/adapters/yarn-node-container.js |  64 
 .../hadoop-yarn-ui/app/adapters/yarn-node.js|  40 +
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |   3 +-
 .../hadoop-yarn-ui/app/adapters/yarn-rm-node.js |  45 ++
 .../app/components/simple-table.js  |  38 -
 .../hadoop-yarn/hadoop-yarn-ui/app/config.js|  27 
 .../hadoop-yarn/hadoop-yarn-ui/app/constants.js |  24 +++
 .../app/controllers/application.js  |  55 +++
 .../hadoop-yarn-ui/app/helpers/divide.js|  31 
 .../app/helpers/log-files-comma.js  |  48 ++
 .../hadoop-yarn-ui/app/helpers/node-link.js |  37 +
 .../hadoop-yarn-ui/app/helpers/node-menu.js |  66 
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  14 +-
 .../app/models/yarn-container-log.js|  25 +++
 .../hadoop-yarn-ui/app/models/yarn-node-app.js  |  44 ++
 .../app/models/yarn-node-container.js   |  57 +++
 .../hadoop-yarn-ui/app/models/yarn-node.js  |  33 
 .../hadoop-yarn-ui/app/models/yarn-rm-node.js   |  92 +++
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  13 ++
 .../hadoop-yarn-ui/app/routes/application.js|  38 +
 .../hadoop-yarn-ui/app/routes/index.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-apps.js  |   4 +-
 .../app/routes/yarn-container-log.js|  55 +++
 .../hadoop-yarn-ui/app/routes/yarn-node-app.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-node-apps.js |  29 
 .../app/routes/yarn-node-container.js   |  30 
 .../app/routes/yarn-node-containers.js  |  28 
 .../hadoop-yarn-ui/app/routes/yarn-node.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-nodes.js |  25 +++
 .../app/serializers/yarn-container-log.js   |  39 +
 .../app/serializers/yarn-node-app.js|  86 +++
 .../app/serializers/yarn-node-container.js  |  74 +
 .../hadoop-yarn-ui/app/serializers/yarn-node.js |  56 +++
 .../app/serializers/yarn-rm-node.js |  77 ++
 .../app/templates/application.hbs   |   4 +-
 .../hadoop-yarn-ui/app/templates/error.hbs  |  19 +++
 .../hadoop-yarn-ui/app/templates/notfound.hbs   |  20 +++
 .../hadoop-yarn-ui/app/templates/yarn-apps.hbs  |   4 +-
 .../app/templates/yarn-container-log.hbs|  36 +
 .../app/templates/yarn-node-app.hbs |  60 
 .../app/templates/yarn-node-apps.hbs|  51 +++
 .../app/templates/yarn-node-container.hbs   |  70 +
 .../app/templates/yarn-node-containers.hbs  |  58 +++
 .../hadoop-yarn-ui/app/templates/yarn-node.hbs  |  94 
 .../hadoop-yarn-ui/app/templates/yarn-nodes.hbs |  65 
 .../hadoop-yarn-ui/app/utils/converter.js   |  21 ++-
 .../hadoop-yarn-ui/app/utils/sorter.js  |  42 -
 .../hadoop-yarn/hadoop-yarn-ui/bower.json   |   2 +-
 .../hadoop-yarn-ui/config/environment.js|   1 -
 .../unit/adapters/yarn-container-log-test.js|  73 +
 .../tests/unit/adapters/yarn-node-app-test.js   |  93 +++
 .../unit/adapters/yarn-node-container-test.js   |  93 +++
 .../tests/unit/adapters/yarn-node-test.js   |  42 +
 .../tests/unit/adapters/yarn-rm-node-test.js|  44 ++
 .../unit/models/yarn-container-log-test.js  |  48 ++
 .../tests/unit/models/yarn-node-app-test.js |  65 
 .../unit/models/yarn-node-container-test.js |  78 ++
 .../tests/unit/models/yarn-node-test.js |  58 +++
 .../tests/unit/models/yarn-rm-node-test.js  |  95 
 .../unit/routes/yarn-container-log-test.js  | 120 +++
 .../tests/unit/routes/yarn-node-app-test.js |  56 +++
 .../tests/unit/routes/yarn-node-apps-test.js|  60 
 .../unit/routes/yarn-node-container-test.js |  

[18/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to fix document. (Wangda Tan via Sunil G)

2016-10-21 Thread wangda
YARN-4849. Addendum patch to fix document. (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/490c7063
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/490c7063
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/490c7063

Branch: refs/heads/YARN-3368
Commit: 490c706396c449caa08d1a3a45ac97dc21c30a1a
Parents: 9fbb0f2
Author: sunilg 
Authored: Wed Aug 24 16:10:19 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 BUILDING.txt|  2 +-
 .../src/site/markdown/YarnUI2.md| 36 +++-
 2 files changed, 21 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/490c7063/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 4424579..908c366 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -130,7 +130,7 @@ Maven build goals:
   * Use -Psrc to create a project source TAR.GZ
   * Use -Dtar to create a TAR with the distribution (using -Pdist)
   * Use -Preleasedocs to include the changelog and release docs (requires 
Internet connectivity)
-  * Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity, and it 
is for dev use only)
+  * Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity)
 
  Snappy build options:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/490c7063/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
index 575ebc7..ff48183 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
@@ -17,27 +17,31 @@
 
 Hadoop: YARN-UI V2
 =
-*This is a WIP project, nobody should use it in production.*
 
 Prerequisites
 -
 
-You will need the following things properly installed on your computer.
+If you run RM locally in your computer for test purpose, you need the 
following things properly installed.
 
-* Install Node.js with NPM: https://nodejs.org/download/
-* After Node.js installed, install bower: `npm install -g bower`.
-* Install Ember-cli: `npm install -g ember-cli`
+- Install Node.js with NPM: https://nodejs.org/download
+- After Node.js installed, install `corsproxy`: `npm install -g corsproxy`.
 
-BUILD
-
-* Please refer to BUILDING.txt in the top directory and pass -Pyarn-ui to 
build UI-related code
-* Execute `mvn test -Pyarn-ui` to run unit tests
 
-Try it
---
+Configurations
+-
+
+*In yarn-site.xml*
+
+| Configuration Property | Description |
+|: |: |
+| `yarn.resourcemanager.webapp.ui2.enable` | In the server side it indicates 
whether the new YARN-UI v2 is enabled or not. Defaults to `false`. |
+| `yarn.resourcemanager.webapp.ui2.address` | Specify the address of 
ResourceManager and port which host YARN-UI v2, defaults to `localhost:8288`. |
 
-* Packaging and deploying Hadoop in this branch
-* In 
`hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/config.js`, 
change `timelineWebUrl` and `rmWebUrl` to your YARN RM/Timeline server web 
address. 
-* If you are running YARN RM in your localhost, you should update 
`localBaseUrl` to `localhost:1337/`, install `npm install -g corsproxy` and run 
`corsproxy` to avoid CORS errors. More details: 
`https://www.npmjs.com/package/corsproxy`. 
-* Run `ember serve` under 
`hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/`
-* Visit your app at [http://localhost:4200](http://localhost:4200).
+*In $HADOOP_PREFIX/share/hadoop/yarn/webapps/rm/config/configs.env*
+
+- Update timelineWebAddress and rmWebAddress to the actual addresses run 
resource manager and timeline server
+- If you run RM locally in you computer just for test purpose, you need to 
keep `corsproxy` running. Otherwise, you need to set `localBaseAddress` to 
empty.
+
+Use it
+-
+Open your browser, go to `rm-address:8288` and try it!


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: YARN-5583. [YARN-3368] Fix wrong paths in .gitignore (Sreenath Somarajapuram via Sunil G)

2016-10-21 Thread wangda
YARN-5583. [YARN-3368] Fix wrong paths in .gitignore (Sreenath Somarajapuram 
via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/045c7d59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/045c7d59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/045c7d59

Branch: refs/heads/YARN-3368
Commit: 045c7d59b1398ad1a83810ec78632d2c315fd242
Parents: 951658d
Author: sunilg 
Authored: Tue Aug 30 20:27:59 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 .gitignore | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/045c7d59/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 677bde6..f9a7163 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,8 +35,8 @@ 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.sass-cache
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/connect.lock
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/coverage/*
 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/libpeerconnection.log
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webappnpm-debug.log
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapptestem.log
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/npm-debug.log
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/testem.log
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp
 yarnregistry.pdf


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: YARN-5747. Application timeline metric aggregation in timeline v2 will lose last round aggregation when an application finishes (Li Lu via Varun Saxena)

2016-10-21 Thread wangda
YARN-5747. Application timeline metric aggregation in timeline v2 will lose 
last round aggregation when an application finishes (Li Lu via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44eb2bd7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44eb2bd7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44eb2bd7

Branch: refs/heads/YARN-3368
Commit: 44eb2bd7ae39cca77fc8c7ad493b52ea1bb43530
Parents: f63cd78
Author: Varun Saxena 
Authored: Sat Oct 22 01:14:49 2016 +0530
Committer: Varun Saxena 
Committed: Sat Oct 22 01:14:49 2016 +0530

--
 .../collector/AppLevelTimelineCollector.java  | 14 +++---
 1 file changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44eb2bd7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
index d276269..e62a436 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
@@ -58,6 +58,7 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
   private final ApplicationId appId;
   private final TimelineCollectorContext context;
   private ScheduledThreadPoolExecutor appAggregationExecutor;
+  private AppLevelAggregator appAggregator;
 
   public AppLevelTimelineCollector(ApplicationId appId) {
 super(AppLevelTimelineCollector.class.getName() + " - " + 
appId.toString());
@@ -94,7 +95,8 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 new ThreadFactoryBuilder()
 .setNameFormat("TimelineCollector Aggregation thread #%d")
 .build());
-appAggregationExecutor.scheduleAtFixedRate(new AppLevelAggregator(),
+appAggregator = new AppLevelAggregator();
+appAggregationExecutor.scheduleAtFixedRate(appAggregator,
 AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
 AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
 TimeUnit.SECONDS);
@@ -108,6 +110,8 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
   LOG.info("App-level aggregator shutdown timed out, shutdown now. ");
   appAggregationExecutor.shutdownNow();
 }
+// Perform one round of aggregation after the aggregation executor is done.
+appAggregator.aggregate();
 super.serviceStop();
   }
 
@@ -123,8 +127,7 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 
   private class AppLevelAggregator implements Runnable {
 
-@Override
-public void run() {
+private void aggregate() {
   if (LOG.isDebugEnabled()) {
 LOG.debug("App-level real-time aggregating");
   }
@@ -156,6 +159,11 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 LOG.debug("App-level real-time aggregation complete");
   }
 }
+
+@Override
+public void run() {
+  aggregate();
+}
   }
 
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: YARN-5598. [YARN-3368] Fix create-release to be able to generate bits for the new yarn-ui (Wangda Tan via Sunil G)

2016-10-21 Thread wangda
YARN-5598. [YARN-3368] Fix create-release to be able to generate bits for the 
new yarn-ui (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a4801a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a4801a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a4801a3

Branch: refs/heads/YARN-3368
Commit: 5a4801a3e176265d811d54c0ce358db5a07cbbc9
Parents: a313bb1
Author: sunilg 
Authored: Tue Sep 6 23:15:59 2016 +0530
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 dev-support/bin/create-release |   2 +-
 dev-support/create-release.sh  | 144 
 dev-support/docker/Dockerfile  |   6 +-
 3 files changed, 6 insertions(+), 146 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a4801a3/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 0e0ab86..d40fffa 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -527,7 +527,7 @@ function makearelease
   # shellcheck disable=SC2046
   run_and_redirect "${LOGDIR}/mvn_install.log" \
 "${MVN}" "${MVN_ARGS[@]}" install \
-  -Pdist,src \
+  -Pdist,src,yarn-ui \
   "${signflags[@]}" \
   -DskipTests -Dtar $(hadoop_native_flags)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a4801a3/dev-support/create-release.sh
--
diff --git a/dev-support/create-release.sh b/dev-support/create-release.sh
deleted file mode 100755
index 792a805..000
--- a/dev-support/create-release.sh
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Function to probe the exit code of the script commands, 
-# and stop in the case of failure with an contextual error 
-# message.
-run() {
-  echo "\$ ${@}"
-  "${@}"
-  exitCode=$?
-  if [[ $exitCode != 0 ]]; then
-echo
-echo "Failed! running ${@} in `pwd`"
-echo
-exit $exitCode
-  fi
-}
-
-doMD5() {
-  MD5CMD="md5sum"
-  which $MD5CMD
-  if [[ $? != 0 ]]; then
-MD5CMD="md5"
-  fi
-  run $MD5CMD ${1} > ${1}.md5
-}
-
-# If provided, the created release artifacts will be tagged with it 
-# (use RC#, i.e: RC0). Do not use a label to create the final release 
-# artifact.
-RC_LABEL=$1
-
-# Extract Hadoop version from POM
-HADOOP_VERSION=`cat pom.xml | grep "" | head -1 | sed 's|^ 
*||' | sed 's|.*$||'`
-
-# Setup git
-GIT=${GIT:-git}
-
-echo
-echo "*"
-echo
-echo "Hadoop version to create release artifacts: ${HADOOP_VERSION}"
-echo 
-echo "Release Candidate Label: ${RC_LABEL}"
-echo
-echo "*"
-echo
-
-if [[ ! -z ${RC_LABEL} ]]; then
-  RC_LABEL="-${RC_LABEL}"
-fi
-
-# Get Maven command
-if [ -z "$MAVEN_HOME" ]; then
-  MVN=mvn
-else
-  MVN=$MAVEN_HOME/bin/mvn
-fi
-
-ARTIFACTS_DIR="target/artifacts"
-
-# git clean to clear any remnants from previous build
-run ${GIT} clean -xdf
-
-# mvn clean for sanity
-run ${MVN} clean
-
-# Create staging dir for release artifacts
-run mkdir -p ${ARTIFACTS_DIR}
-
-# Create RAT report
-run ${MVN} apache-rat:check
-
-# Create SRC and BIN tarballs for release,
-# Using 'install’ goal instead of 'package' so artifacts are available 
-# in the Maven local cache for the site generation
-run ${MVN} install -Pdist,src,native,yarn-ui -DskipTests -Dtar
-
-# Create site for release
-run ${MVN} site site:stage -Pdist -Psrc
-run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-yarn
-run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-mapreduce
-run cp ./hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html 
target/staging/hadoop-project/hadoop-project-dist/hadoop-common/
-run cp ./hadoop-common-project/hadoop-common/CHANGES.txt 

[07/50] [abbrv] hadoop git commit: YARN-5047. Refactor nodeUpdate across schedulers. (Ray Chiang via kasha)

2016-10-21 Thread wangda
YARN-5047. Refactor nodeUpdate across schedulers. (Ray Chiang via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/754cb4e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/754cb4e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/754cb4e3

Branch: refs/heads/YARN-3368
Commit: 754cb4e30fac1c5fe8d44626968c0ddbfe459335
Parents: a064865
Author: Karthik Kambatla 
Authored: Thu Oct 20 21:17:48 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Oct 20 21:17:48 2016 -0700

--
 .../scheduler/AbstractYarnScheduler.java| 186 ++-
 .../scheduler/capacity/CapacityScheduler.java   | 122 ++--
 .../scheduler/fair/FairScheduler.java   |  80 +---
 .../scheduler/fifo/FifoScheduler.java   |  94 +++---
 ...estProportionalCapacityPreemptionPolicy.java |   4 +-
 5 files changed, 225 insertions(+), 261 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/754cb4e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 645e06d..df59556 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
@@ -73,7 +74,12 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReco
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
+import org.apache.hadoop.yarn.server.utils.Lock;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.SettableFuture;
@@ -94,10 +100,14 @@ public abstract class AbstractYarnScheduler
   protected Resource minimumAllocation;
 
   protected volatile RMContext rmContext;
-  
+
   private volatile Priority maxClusterLevelAppPriority;
 
   protected ActivitiesManager activitiesManager;
+  protected SchedulerHealth schedulerHealth = new SchedulerHealth();
+  protected volatile long lastNodeUpdateTime;
+
+  private volatile Clock clock;
 
   /*
* All schedulers which are inheriting AbstractYarnScheduler should use
@@ -130,6 +140,7 @@ public abstract class AbstractYarnScheduler
*/
   public AbstractYarnScheduler(String name) {
 super(name);
+clock = SystemClock.getInstance();
 ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
 readLock = lock.readLock();
 writeLock = lock.writeLock();
@@ -228,13 +239,25 @@ public abstract class AbstractYarnScheduler
 nodeTracker.setConfiguredMaxAllocation(maximumAllocation);
   }
 
+  public SchedulerHealth getSchedulerHealth() {
+return this.schedulerHealth;
+  }
+
+  protected void setLastNodeUpdateTime(long time) {
+this.lastNodeUpdateTime = time;
+  }
+
+  public long getLastNodeUpdateTime() {
+return lastNodeUpdateTime;
+  }
+
   protected void containerLaunchedOnNode(
   ContainerId containerId, SchedulerNode node) {
 try {
   readLock.lock();
   // Get 

[01/50] [abbrv] hadoop git commit: HDFS-11018. Incorrect check and message in FsDatasetImpl#invalidate. Contributed by Yiqun Lin. [Forced Update!]

2016-10-21 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368 1d6895580 -> 9690f297f (forced update)


HDFS-11018. Incorrect check and message in FsDatasetImpl#invalidate. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d2da38d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d2da38d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d2da38d

Branch: refs/heads/YARN-3368
Commit: 6d2da38d16cebe9b82f1048f87127eecee33664c
Parents: f872c6b
Author: Wei-Chiu Chuang 
Authored: Thu Oct 20 10:49:39 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Oct 20 10:56:39 2016 -0700

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 31 +---
 1 file changed, 21 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d2da38d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index ba653ac..84569f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -786,8 +786,14 @@ class FsDatasetImpl implements FsDatasetSpi {
   throws ReplicaNotFoundException {
 ReplicaInfo info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
 if (info == null) {
-  throw new ReplicaNotFoundException(
-  ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
+  if (volumeMap.get(b.getBlockPoolId(), b.getLocalBlock().getBlockId())
+  == null) {
+throw new ReplicaNotFoundException(
+ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
+  } else {
+throw new ReplicaNotFoundException(
+ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b);
+  }
 }
 return info;
   }
@@ -1878,16 +1884,21 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   try (AutoCloseableLock lock = datasetLock.acquire()) {
 final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
 if (info == null) {
-  // It is okay if the block is not found -- it may be deleted earlier.
-  LOG.info("Failed to delete replica " + invalidBlks[i]
-  + ": ReplicaInfo not found.");
-  continue;
-}
-if (info.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
-  errors.add("Failed to delete replica " + invalidBlks[i]
-  + ": GenerationStamp not matched, info=" + info);
+  ReplicaInfo infoByBlockId =
+  volumeMap.get(bpid, invalidBlks[i].getBlockId());
+  if (infoByBlockId == null) {
+// It is okay if the block is not found -- it
+// may be deleted earlier.
+LOG.info("Failed to delete replica " + invalidBlks[i]
++ ": ReplicaInfo not found.");
+  } else {
+errors.add("Failed to delete replica " + invalidBlks[i]
++ ": GenerationStamp not matched, existing replica is "
++ Block.toString(infoByBlockId));
+  }
   continue;
 }
+
 v = (FsVolumeImpl)info.getVolume();
 if (v == null) {
   errors.add("Failed to delete replica " + invalidBlks[i]


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: HDFS-10998. Add unit tests for HDFS command 'dfsadmin -fetchImage' in HA. Contributed by Xiaobing Zhou

2016-10-21 Thread wangda
HDFS-10998. Add unit tests for HDFS command 'dfsadmin -fetchImage' in HA. 
Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7d87dee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7d87dee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7d87dee

Branch: refs/heads/YARN-3368
Commit: d7d87deece66333c188e9b7c10b4b56ddb529ce9
Parents: 262827c
Author: Mingliang Liu 
Authored: Thu Oct 20 19:51:48 2016 -0700
Committer: Mingliang Liu 
Committed: Thu Oct 20 19:51:48 2016 -0700

--
 .../org/apache/hadoop/hdfs/TestFetchImage.java  | 105 ++-
 1 file changed, 79 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7d87dee/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
index d8218b6..7e1e593 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
@@ -17,10 +17,15 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
 import static org.junit.Assert.assertEquals;
 
 import java.io.File;
+import java.io.IOException;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -29,11 +34,16 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestFetchImage {
@@ -43,46 +53,89 @@ public class TestFetchImage {
   // Shamelessly stolen from NNStorage.
   private static final Pattern IMAGE_REGEX = Pattern.compile("fsimage_(\\d+)");
 
+  private MiniDFSCluster cluster;
+  private NameNode nn0 = null;
+  private NameNode nn1 = null;
+  private Configuration conf = null;
+
+  @BeforeClass
+  public static void setupImageDir() {
+FETCHED_IMAGE_FILE.mkdirs();
+  }
+
   @AfterClass
   public static void cleanup() {
 FileUtil.fullyDelete(FETCHED_IMAGE_FILE);
   }
 
+  @Before
+  public void setupCluster() throws IOException, URISyntaxException {
+conf = new Configuration();
+conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
+conf.setInt(DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+conf.setLong(DFS_BLOCK_SIZE_KEY, 1024);
+
+cluster = new MiniDFSCluster.Builder(conf)
+.nnTopology(MiniDFSNNTopology.simpleHATopology())
+.numDataNodes(1)
+.build();
+nn0 = cluster.getNameNode(0);
+nn1 = cluster.getNameNode(1);
+HATestUtil.configureFailoverFs(cluster, conf);
+cluster.waitActive();
+  }
+
   /**
* Download a few fsimages using `hdfs dfsadmin -fetchImage ...' and verify
* the results.
*/
-  @Test
-  public void testFetchImage() throws Exception {
-FETCHED_IMAGE_FILE.mkdirs();
-Configuration conf = new Configuration();
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
-FileSystem fs = null;
-try {
-  DFSAdmin dfsAdmin = new DFSAdmin();
-  dfsAdmin.setConf(conf);
-  
+  @Test(timeout=3)
+  public void testFetchImageHA() throws Exception {
+final Path parent = new Path(
+PathUtils.getTestPath(getClass()),
+GenericTestUtils.getMethodName());
+
+int nnIndex = 0;
+/* run on nn0 as active */
+cluster.transitionToActive(nnIndex);
+testFetchImageInternal(
+nnIndex,
+new Path(parent, "dir1"),
+new Path(parent, "dir2"));
+
+/* run on nn1 as active */
+nnIndex = 1;
+HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
+cluster.transitionToActive(nnIndex);
+testFetchImageInternal(
+nnIndex,
+new Path(parent, "dir3"),
+new Path(parent, "dir4"));
+  }
+
+  private void 

[03/50] [abbrv] hadoop git commit: HDFS-10976. Report erasure coding policy of EC files in Fsck. Contributed by Wei-Chiu Chuang.

2016-10-21 Thread wangda
HDFS-10976. Report erasure coding policy of EC files in Fsck. Contributed by 
Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e83a21c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e83a21c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e83a21c

Branch: refs/heads/YARN-3368
Commit: 5e83a21cb66c78e89ac5af9a130ab0aee596a9f4
Parents: 3fbf4cd
Author: Wei-Chiu Chuang 
Authored: Thu Oct 20 13:02:16 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Oct 20 13:06:43 2016 -0700

--
 .../hadoop/hdfs/server/namenode/NamenodeFsck.java | 14 --
 .../apache/hadoop/hdfs/server/namenode/TestFsck.java  | 14 +-
 2 files changed, 25 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e83a21c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 8302035..a2e249d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -540,11 +541,20 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 res.totalFiles++;
 res.totalSize += fileLen;
 res.totalBlocks += blocks.locatedBlockCount();
+String redundancyPolicy;
+ErasureCodingPolicy ecPolicy = file.getErasureCodingPolicy();
+if (ecPolicy == null) { // a replicated file
+  redundancyPolicy = "replicated: replication=" +
+  file.getReplication() + ",";
+} else {
+  redundancyPolicy = "erasure-coded: policy=" + ecPolicy.getName() + ",";
+}
+
 if (showOpenFiles && isOpen) {
-  out.print(path + " " + fileLen + " bytes, " +
+  out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
 blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
 } else if (showFiles) {
-  out.print(path + " " + fileLen + " bytes, " +
+  out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
 blocks.locatedBlockCount() + " block(s): ");
 } else if (showprogress) {
   out.print('.');

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e83a21c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index aa41e9b..254a86c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -1700,9 +1700,21 @@ public class TestFsck {
 // restart the cluster; bring up namenode but not the data nodes
 cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(0).format(false).build();
-outStr = runFsck(conf, 1, true, "/");
+outStr = runFsck(conf, 1, true, "/", "-files", "-blocks");
 // expect the result is corrupt
 assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+String[] outLines = outStr.split("\\r?\\n");
+for (String line: outLines) {
+  if (line.contains(largeFilePath.toString())) {
+final HdfsFileStatus file = cluster.getNameNode().getRpcServer().
+getFileInfo(largeFilePath.toString());
+assertTrue(line.contains("policy=" +
+file.getErasureCodingPolicy().getName()));
+  } else if (line.contains(replFilePath.toString())) {
+assertTrue(line.contains("replication=" + cluster.getFileSystem().
+getFileStatus(replFilePath).getReplication()));
+  }
+}
 System.out.println(outStr);
   }
 



[13/50] [abbrv] hadoop git commit: YARN-5509. Build error due to preparing 3.0.0-alpha2 deployment. (Kai Sasaki via wangda)

2016-10-21 Thread wangda
YARN-5509. Build error due to preparing 3.0.0-alpha2 deployment. (Kai Sasaki 
via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/452a334a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/452a334a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/452a334a

Branch: refs/heads/YARN-3368
Commit: 452a334a76b0c11864531cfbe14cf80369980624
Parents: e91f960
Author: Wangda Tan 
Authored: Thu Aug 11 14:59:14 2016 -0700
Committer: Wangda Tan 
Committed: Fri Oct 21 13:56:31 2016 -0700

--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/452a334a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 6d46fda..2933a76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -20,12 +20,12 @@
   
 hadoop-yarn
 org.apache.hadoop
-3.0.0-alpha1-SNAPSHOT
+3.0.0-alpha2-SNAPSHOT
   
   4.0.0
   org.apache.hadoop
   hadoop-yarn-ui
-  3.0.0-alpha1-SNAPSHOT
+  3.0.0-alpha2-SNAPSHOT
   Apache Hadoop YARN UI
   ${packaging.type}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5679. TestAHSWebServices is failing (ajisakaa via rkanter)

2016-10-21 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 61e30cf83 -> 23d7d53a4


YARN-5679. TestAHSWebServices is failing (ajisakaa via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23d7d53a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23d7d53a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23d7d53a

Branch: refs/heads/trunk
Commit: 23d7d53a41c6a59efa4093ae563c45af911005d4
Parents: 61e30cf
Author: Robert Kanter 
Authored: Fri Oct 21 13:31:03 2016 -0700
Committer: Robert Kanter 
Committed: Fri Oct 21 13:31:03 2016 -0700

--
 .../hadoop/yarn/logaggregation/AggregatedLogFormat.java   | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23d7d53a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 3c4f835..02528d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -97,7 +97,9 @@ public class AggregatedLogFormat {
*/
   private static final FsPermission APP_LOG_FILE_UMASK = FsPermission
   .createImmutable((short) (0640 ^ 0777));
-
+  /** Default permission for the log file. */
+  private static final FsPermission APP_LOG_FILE_PERM =
+  FsPermission.getFileDefault().applyUMask(APP_LOG_FILE_UMASK);
 
   static {
 RESERVED_KEYS = new HashMap();
@@ -458,11 +460,10 @@ public class AggregatedLogFormat {
   @Override
   public FSDataOutputStream run() throws Exception {
 fc = FileContext.getFileContext(remoteAppLogFile.toUri(), 
conf);
-fc.setUMask(APP_LOG_FILE_UMASK);
 return fc.create(
 remoteAppLogFile,
 EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
-new Options.CreateOpts[] {});
+Options.CreateOpts.perms(APP_LOG_FILE_PERM));
   }
 });
   } catch (InterruptedException e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-8410. Add computation time metrics to datanode for ECWorker. Contributed by SammiChen.

2016-10-21 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk ae8bccd50 -> 61e30cf83


HDFS-8410. Add computation time metrics to datanode for ECWorker. Contributed 
by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61e30cf8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61e30cf8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61e30cf8

Branch: refs/heads/trunk
Commit: 61e30cf83ca78529603d9b4c6732418da7e4d0c8
Parents: ae8bccd
Author: Andrew Wang 
Authored: Fri Oct 21 13:12:35 2016 -0700
Committer: Andrew Wang 
Committed: Fri Oct 21 13:12:41 2016 -0700

--
 .../erasurecode/StripedBlockReconstructor.java  |  3 ++
 .../datanode/metrics/DataNodeMetrics.java   | 13 +-
 .../TestDataNodeErasureCodingMetrics.java   | 43 +---
 3 files changed, 43 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61e30cf8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
index 9f9f15d..a8e9d30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
@@ -103,7 +103,10 @@ class StripedBlockReconstructor extends 
StripedReconstructor
 int[] erasedIndices = stripedWriter.getRealTargetIndices();
 ByteBuffer[] outputs = 
stripedWriter.getRealTargetBuffers(toReconstructLen);
 
+long start = System.nanoTime();
 getDecoder().decode(inputs, erasedIndices, outputs);
+long end = System.nanoTime();
+this.getDatanode().getMetrics().incrECDecodingTime(end - start);
 
 stripedWriter.updateRealTargetBuffers(toReconstructLen);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61e30cf8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index dc12787..23e15a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode.metrics;
 
 import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
+import static org.apache.hadoop.metrics2.lib.Interns.info;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -134,6 +135,8 @@ public class DataNodeMetrics {
   MutableCounterLong ecReconstructionTasks;
   @Metric("Count of erasure coding failed reconstruction tasks")
   MutableCounterLong ecFailedReconstructionTasks;
+  // Nanoseconds spent by decoding tasks.
+  MutableCounterLong ecDecodingTimeNanos;
 
   final MetricsRegistry registry = new MetricsRegistry("datanode");
   final String name;
@@ -153,7 +156,10 @@ public class DataNodeMetrics {
 sendDataPacketTransferNanosQuantiles = new MutableQuantiles[len];
 ramDiskBlocksEvictionWindowMsQuantiles = new MutableQuantiles[len];
 ramDiskBlocksLazyPersistWindowMsQuantiles = new MutableQuantiles[len];
-
+ecDecodingTimeNanos = registry.newCounter(
+info("ecDecodingTimeNanos", "Nanoseconds spent by decoding tasks"),
+(long) 0);
+
 for (int i = 0; i < len; i++) {
   int interval = intervals[i];
   packetAckRoundTripTimeNanosQuantiles[i] = registry.newQuantiles(
@@ -442,7 +448,10 @@ public class DataNodeMetrics {
   }
 
   public void setDataNodeActiveXceiversCount(int value) {
-this.dataNodeActiveXceiversCount.set(value);
+dataNodeActiveXceiversCount.set(value);
   }
 
+  public void incrECDecodingTime(long decodingTimeNanos) {
+ecDecodingTimeNanos.incr(decodingTimeNanos);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61e30cf8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java

[1/2] hadoop git commit: HADOOP-13702. Add instrumented ReadWriteLock. Contributed by Jingcheng Du

2016-10-21 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 385c1daa4 -> 25f4327f0
  refs/heads/trunk 44eb2bd7a -> ae8bccd50


HADOOP-13702. Add instrumented ReadWriteLock. Contributed by Jingcheng Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae8bccd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae8bccd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae8bccd5

Branch: refs/heads/trunk
Commit: ae8bccd5090d8b42dae9a8e0c13a9766a7c42ecb
Parents: 44eb2bd
Author: Chris Douglas 
Authored: Fri Oct 21 11:28:11 2016 -0700
Committer: Chris Douglas 
Committed: Fri Oct 21 12:59:54 2016 -0700

--
 .../apache/hadoop/util/InstrumentedLock.java| 197 
 .../hadoop/util/InstrumentedReadLock.java   |  92 
 .../hadoop/util/InstrumentedReadWriteLock.java  |  58 +
 .../hadoop/util/InstrumentedWriteLock.java  |  54 +
 .../hadoop/util/TestInstrumentedLock.java   | 162 +
 .../util/TestInstrumentedReadWriteLock.java | 234 +++
 .../apache/hadoop/hdfs/InstrumentedLock.java| 185 ---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   2 +-
 .../hadoop/hdfs/TestInstrumentedLock.java   | 166 -
 9 files changed, 798 insertions(+), 352 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae8bccd5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
new file mode 100644
index 000..0520271
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.commons.logging.Log;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * This is a debugging class that can be used by callers to track
+ * whether a specific lock is being held for too long and periodically
+ * log a warning and stack trace, if so.
+ *
+ * The logged warnings are throttled so that logs are not spammed.
+ *
+ * A new instance of InstrumentedLock can be created for each object
+ * that needs to be instrumented.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class InstrumentedLock implements Lock {
+
+  private final Lock lock;
+  private final Log logger;
+  private final String name;
+  private final Timer clock;
+
+  /** Minimum gap between two lock warnings. */
+  private final long minLoggingGap;
+  /** Threshold for detecting long lock held time. */
+  private final long lockWarningThreshold;
+
+  // Tracking counters for lock statistics.
+  private volatile long lockAcquireTimestamp;
+  private final AtomicLong lastLogTimestamp;
+  private final AtomicLong warningsSuppressed = new AtomicLong(0);
+
+  /**
+   * Create a instrumented lock instance which logs a warning message
+   * when lock held time is above given threshold.
+   *
+   * @param name the identifier of the lock object
+   * @param logger this class does not have its own logger, will log to the
+   *   given logger instead
+   * @param minLoggingGapMs  the minimum time gap between two log messages,
+   * this is to avoid spamming to many logs
+   * @param lockWarningThresholdMs the time threshold to view lock held
+   *

[2/2] hadoop git commit: HADOOP-13702. Add instrumented ReadWriteLock. Contributed by Jingcheng Du

2016-10-21 Thread cdouglas
HADOOP-13702. Add instrumented ReadWriteLock. Contributed by Jingcheng Du

(cherry picked from commit ae8bccd5090d8b42dae9a8e0c13a9766a7c42ecb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25f4327f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25f4327f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25f4327f

Branch: refs/heads/branch-2
Commit: 25f4327f0baa947bb99dc808077e3266e0fd982b
Parents: 385c1da
Author: Chris Douglas 
Authored: Fri Oct 21 11:28:11 2016 -0700
Committer: Chris Douglas 
Committed: Fri Oct 21 13:01:08 2016 -0700

--
 .../apache/hadoop/util/InstrumentedLock.java| 197 
 .../hadoop/util/InstrumentedReadLock.java   |  92 
 .../hadoop/util/InstrumentedReadWriteLock.java  |  58 +
 .../hadoop/util/InstrumentedWriteLock.java  |  54 +
 .../hadoop/util/TestInstrumentedLock.java   | 162 +
 .../util/TestInstrumentedReadWriteLock.java | 234 +++
 .../apache/hadoop/hdfs/InstrumentedLock.java| 185 ---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   2 +-
 .../hadoop/hdfs/TestInstrumentedLock.java   | 166 -
 9 files changed, 798 insertions(+), 352 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f4327f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
new file mode 100644
index 000..0520271
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.commons.logging.Log;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * This is a debugging class that can be used by callers to track
+ * whether a specific lock is being held for too long and periodically
+ * log a warning and stack trace, if so.
+ *
+ * The logged warnings are throttled so that logs are not spammed.
+ *
+ * A new instance of InstrumentedLock can be created for each object
+ * that needs to be instrumented.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class InstrumentedLock implements Lock {
+
+  private final Lock lock;
+  private final Log logger;
+  private final String name;
+  private final Timer clock;
+
+  /** Minimum gap between two lock warnings. */
+  private final long minLoggingGap;
+  /** Threshold for detecting long lock held time. */
+  private final long lockWarningThreshold;
+
+  // Tracking counters for lock statistics.
+  private volatile long lockAcquireTimestamp;
+  private final AtomicLong lastLogTimestamp;
+  private final AtomicLong warningsSuppressed = new AtomicLong(0);
+
+  /**
+   * Create a instrumented lock instance which logs a warning message
+   * when lock held time is above given threshold.
+   *
+   * @param name the identifier of the lock object
+   * @param logger this class does not have its own logger, will log to the
+   *   given logger instead
+   * @param minLoggingGapMs  the minimum time gap between two log messages,
+   * this is to avoid spamming to many logs
+   * @param lockWarningThresholdMs the time threshold to view lock held
+   *   time as being "too long"
+   */
+  public 

hadoop git commit: YARN-5747. Application timeline metric aggregation in timeline v2 will lose last round aggregation when an application finishes (Li Lu via Varun Saxena)

2016-10-21 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 f015e30fb -> 77fe5cc2b


YARN-5747. Application timeline metric aggregation in timeline v2 will lose 
last round aggregation when an application finishes (Li Lu via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77fe5cc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77fe5cc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77fe5cc2

Branch: refs/heads/YARN-5355-branch-2
Commit: 77fe5cc2bfa9c1178977bf00392f45e9d307edd1
Parents: f015e30
Author: Varun Saxena 
Authored: Sat Oct 22 01:17:11 2016 +0530
Committer: Varun Saxena 
Committed: Sat Oct 22 01:17:24 2016 +0530

--
 .../collector/AppLevelTimelineCollector.java  | 14 +++---
 1 file changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77fe5cc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
index d276269..e62a436 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
@@ -58,6 +58,7 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
   private final ApplicationId appId;
   private final TimelineCollectorContext context;
   private ScheduledThreadPoolExecutor appAggregationExecutor;
+  private AppLevelAggregator appAggregator;
 
   public AppLevelTimelineCollector(ApplicationId appId) {
 super(AppLevelTimelineCollector.class.getName() + " - " + 
appId.toString());
@@ -94,7 +95,8 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 new ThreadFactoryBuilder()
 .setNameFormat("TimelineCollector Aggregation thread #%d")
 .build());
-appAggregationExecutor.scheduleAtFixedRate(new AppLevelAggregator(),
+appAggregator = new AppLevelAggregator();
+appAggregationExecutor.scheduleAtFixedRate(appAggregator,
 AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
 AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
 TimeUnit.SECONDS);
@@ -108,6 +110,8 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
   LOG.info("App-level aggregator shutdown timed out, shutdown now. ");
   appAggregationExecutor.shutdownNow();
 }
+// Perform one round of aggregation after the aggregation executor is done.
+appAggregator.aggregate();
 super.serviceStop();
   }
 
@@ -123,8 +127,7 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 
   private class AppLevelAggregator implements Runnable {
 
-@Override
-public void run() {
+private void aggregate() {
   if (LOG.isDebugEnabled()) {
 LOG.debug("App-level real-time aggregating");
   }
@@ -156,6 +159,11 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 LOG.debug("App-level real-time aggregation complete");
   }
 }
+
+@Override
+public void run() {
+  aggregate();
+}
   }
 
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5747. Application timeline metric aggregation in timeline v2 will lose last round aggregation when an application finishes (Li Lu via Varun Saxena)

2016-10-21 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 ed77c365f -> 703d129f8


YARN-5747. Application timeline metric aggregation in timeline v2 will lose 
last round aggregation when an application finishes (Li Lu via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/703d129f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/703d129f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/703d129f

Branch: refs/heads/YARN-5355
Commit: 703d129f8048d4b1895d61f9ed53957c33788265
Parents: ed77c36
Author: Varun Saxena 
Authored: Sat Oct 22 01:16:05 2016 +0530
Committer: Varun Saxena 
Committed: Sat Oct 22 01:16:19 2016 +0530

--
 .../collector/AppLevelTimelineCollector.java  | 14 +++---
 1 file changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/703d129f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
index d276269..e62a436 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
@@ -58,6 +58,7 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
   private final ApplicationId appId;
   private final TimelineCollectorContext context;
   private ScheduledThreadPoolExecutor appAggregationExecutor;
+  private AppLevelAggregator appAggregator;
 
   public AppLevelTimelineCollector(ApplicationId appId) {
 super(AppLevelTimelineCollector.class.getName() + " - " + 
appId.toString());
@@ -94,7 +95,8 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 new ThreadFactoryBuilder()
 .setNameFormat("TimelineCollector Aggregation thread #%d")
 .build());
-appAggregationExecutor.scheduleAtFixedRate(new AppLevelAggregator(),
+appAggregator = new AppLevelAggregator();
+appAggregationExecutor.scheduleAtFixedRate(appAggregator,
 AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
 AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
 TimeUnit.SECONDS);
@@ -108,6 +110,8 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
   LOG.info("App-level aggregator shutdown timed out, shutdown now. ");
   appAggregationExecutor.shutdownNow();
 }
+// Perform one round of aggregation after the aggregation executor is done.
+appAggregator.aggregate();
 super.serviceStop();
   }
 
@@ -123,8 +127,7 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 
   private class AppLevelAggregator implements Runnable {
 
-@Override
-public void run() {
+private void aggregate() {
   if (LOG.isDebugEnabled()) {
 LOG.debug("App-level real-time aggregating");
   }
@@ -156,6 +159,11 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 LOG.debug("App-level real-time aggregation complete");
   }
 }
+
+@Override
+public void run() {
+  aggregate();
+}
   }
 
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5747. Application timeline metric aggregation in timeline v2 will lose last round aggregation when an application finishes (Li Lu via Varun Saxena)

2016-10-21 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/trunk f63cd78f6 -> 44eb2bd7a


YARN-5747. Application timeline metric aggregation in timeline v2 will lose 
last round aggregation when an application finishes (Li Lu via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44eb2bd7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44eb2bd7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44eb2bd7

Branch: refs/heads/trunk
Commit: 44eb2bd7ae39cca77fc8c7ad493b52ea1bb43530
Parents: f63cd78
Author: Varun Saxena 
Authored: Sat Oct 22 01:14:49 2016 +0530
Committer: Varun Saxena 
Committed: Sat Oct 22 01:14:49 2016 +0530

--
 .../collector/AppLevelTimelineCollector.java  | 14 +++---
 1 file changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44eb2bd7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
index d276269..e62a436 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
@@ -58,6 +58,7 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
   private final ApplicationId appId;
   private final TimelineCollectorContext context;
   private ScheduledThreadPoolExecutor appAggregationExecutor;
+  private AppLevelAggregator appAggregator;
 
   public AppLevelTimelineCollector(ApplicationId appId) {
 super(AppLevelTimelineCollector.class.getName() + " - " + 
appId.toString());
@@ -94,7 +95,8 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 new ThreadFactoryBuilder()
 .setNameFormat("TimelineCollector Aggregation thread #%d")
 .build());
-appAggregationExecutor.scheduleAtFixedRate(new AppLevelAggregator(),
+appAggregator = new AppLevelAggregator();
+appAggregationExecutor.scheduleAtFixedRate(appAggregator,
 AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
 AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
 TimeUnit.SECONDS);
@@ -108,6 +110,8 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
   LOG.info("App-level aggregator shutdown timed out, shutdown now. ");
   appAggregationExecutor.shutdownNow();
 }
+// Perform one round of aggregation after the aggregation executor is done.
+appAggregator.aggregate();
 super.serviceStop();
   }
 
@@ -123,8 +127,7 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 
   private class AppLevelAggregator implements Runnable {
 
-@Override
-public void run() {
+private void aggregate() {
   if (LOG.isDebugEnabled()) {
 LOG.debug("App-level real-time aggregating");
   }
@@ -156,6 +159,11 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 LOG.debug("App-level real-time aggregation complete");
   }
 }
+
+@Override
+public void run() {
+  aggregate();
+}
   }
 
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13716. Add LambdaTestUtils class for tests; fix eventual consistency problem in contract test setup. Contributed by Steve Loughran.

2016-10-21 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5754772bd -> 4b56954fe


HADOOP-13716. Add LambdaTestUtils class for tests; fix eventual consistency 
problem in contract test setup. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b56954f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b56954f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b56954f

Branch: refs/heads/branch-2.8
Commit: 4b56954fea4c49d03efa6cd329c24609f2e7a847
Parents: 5754772
Author: Steve Loughran 
Authored: Fri Oct 21 19:11:31 2016 +0100
Committer: Steve Loughran 
Committed: Fri Oct 21 19:12:03 2016 +0100

--
 .../AbstractContractRootDirectoryTest.java  |  48 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |   6 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java | 521 +++
 .../apache/hadoop/test/TestLambdaTestUtils.java | 249 +
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |  20 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  48 --
 6 files changed, 816 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b56954f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
index 0a8f464..5fba4bf 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
@@ -27,12 +27,16 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.test.LambdaTestUtils;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.deleteChildren;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.dumpStats;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.listChildren;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.toList;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk;
@@ -45,6 +49,7 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk;
 public abstract class AbstractContractRootDirectoryTest extends 
AbstractFSContractTestBase {
   private static final Logger LOG =
   LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class);
+  public static final int OBJECTSTORE_RETRY_TIMEOUT = 3;
 
   @Override
   public void setup() throws Exception {
@@ -79,23 +84,34 @@ public abstract class AbstractContractRootDirectoryTest 
extends AbstractFSContra
 // extra sanity checks here to avoid support calls about complete loss
 // of data
 skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
-Path root = new Path("/");
+final Path root = new Path("/");
 assertIsDirectory(root);
-// make sure it is clean
-FileSystem fs = getFileSystem();
-deleteChildren(fs, root, true);
-FileStatus[] children = listChildren(fs, root);
-if (children.length > 0) {
-  StringBuilder error = new StringBuilder();
-  error.append("Deletion of child entries failed, still have")
-  .append(children.length)
-  .append(System.lineSeparator());
-  for (FileStatus child : children) {
-error.append("  ").append(child.getPath())
-.append(System.lineSeparator());
-  }
-  fail(error.toString());
-}
+// make sure the directory is clean. This includes some retry logic
+// to forgive blobstores whose listings can be out of sync with the file
+// status;
+final FileSystem fs = getFileSystem();
+final AtomicInteger iterations = new AtomicInteger(0);
+final FileStatus[] originalChildren = listChildren(fs, root);
+LambdaTestUtils.eventually(
+OBJECTSTORE_RETRY_TIMEOUT,
+new Callable() {
+  @Override
+  public Void call() throws Exception {
+FileStatus[] deleted = deleteChildren(fs, root, true);
+FileStatus[] children = listChildren(fs, root);
+if (children.length > 0) {
+  fail(String.format(
+  "After %d 

hadoop git commit: HADOOP-13716. Add LambdaTestUtils class for tests; fix eventual consistency problem in contract test setup. Contributed by Steve Loughran.

2016-10-21 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2ab149f0e -> 385c1daa4


HADOOP-13716. Add LambdaTestUtils class for tests; fix eventual consistency 
problem in contract test setup. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/385c1daa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/385c1daa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/385c1daa

Branch: refs/heads/branch-2
Commit: 385c1daa46758f89ff05ed3fb84b33ff1b6590bb
Parents: 2ab149f
Author: Steve Loughran 
Authored: Fri Oct 21 19:11:31 2016 +0100
Committer: Steve Loughran 
Committed: Fri Oct 21 19:11:31 2016 +0100

--
 .../AbstractContractRootDirectoryTest.java  |  48 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |   6 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java | 521 +++
 .../apache/hadoop/test/TestLambdaTestUtils.java | 249 +
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |  20 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  48 --
 6 files changed, 816 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/385c1daa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
index 0a8f464..5fba4bf 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
@@ -27,12 +27,16 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.test.LambdaTestUtils;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.deleteChildren;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.dumpStats;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.listChildren;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.toList;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk;
@@ -45,6 +49,7 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk;
 public abstract class AbstractContractRootDirectoryTest extends 
AbstractFSContractTestBase {
   private static final Logger LOG =
   LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class);
+  public static final int OBJECTSTORE_RETRY_TIMEOUT = 3;
 
   @Override
   public void setup() throws Exception {
@@ -79,23 +84,34 @@ public abstract class AbstractContractRootDirectoryTest 
extends AbstractFSContra
 // extra sanity checks here to avoid support calls about complete loss
 // of data
 skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
-Path root = new Path("/");
+final Path root = new Path("/");
 assertIsDirectory(root);
-// make sure it is clean
-FileSystem fs = getFileSystem();
-deleteChildren(fs, root, true);
-FileStatus[] children = listChildren(fs, root);
-if (children.length > 0) {
-  StringBuilder error = new StringBuilder();
-  error.append("Deletion of child entries failed, still have")
-  .append(children.length)
-  .append(System.lineSeparator());
-  for (FileStatus child : children) {
-error.append("  ").append(child.getPath())
-.append(System.lineSeparator());
-  }
-  fail(error.toString());
-}
+// make sure the directory is clean. This includes some retry logic
+// to forgive blobstores whose listings can be out of sync with the file
+// status;
+final FileSystem fs = getFileSystem();
+final AtomicInteger iterations = new AtomicInteger(0);
+final FileStatus[] originalChildren = listChildren(fs, root);
+LambdaTestUtils.eventually(
+OBJECTSTORE_RETRY_TIMEOUT,
+new Callable() {
+  @Override
+  public Void call() throws Exception {
+FileStatus[] deleted = deleteChildren(fs, root, true);
+FileStatus[] children = listChildren(fs, root);
+if (children.length > 0) {
+  fail(String.format(
+  "After %d 

[14/50] [abbrv] hadoop git commit: HDFS-9390. Block management for maintenance states.

2016-10-21 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61fb267/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index 63617ad..c125f45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -18,13 +18,19 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -32,6 +38,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
@@ -40,13 +48,23 @@ import org.junit.Test;
  * This class tests node maintenance.
  */
 public class TestMaintenanceState extends AdminStatesBaseTest {
-  public static final Log LOG = LogFactory.getLog(TestMaintenanceState.class);
-  static private final long EXPIRATION_IN_MS = 500;
+  public static final Logger LOG =
+  LoggerFactory.getLogger(TestMaintenanceState.class);
+  static private final long EXPIRATION_IN_MS = 50;
+  private int minMaintenanceR =
+  DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_DEFAULT;
 
   public TestMaintenanceState() {
 setUseCombinedHostFileManager();
   }
 
+  void setMinMaintenanceR(int minMaintenanceR) {
+this.minMaintenanceR = minMaintenanceR;
+getConf().setInt(
+DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
+minMaintenanceR);
+  }
+
   /**
* Verify a node can transition from AdminStates.ENTERING_MAINTENANCE to
* AdminStates.NORMAL.
@@ -55,21 +73,25 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
   public void testTakeNodeOutOfEnteringMaintenance() throws Exception {
 LOG.info("Starting testTakeNodeOutOfEnteringMaintenance");
 final int replicas = 1;
-final int numNamenodes = 1;
-final int numDatanodes = 1;
-final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+final Path file = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
 
-startCluster(numNamenodes, numDatanodes);
+startCluster(1, 1);
 
-FileSystem fileSys = getCluster().getFileSystem(0);
-writeFile(fileSys, file1, replicas, 1);
+final FileSystem fileSys = getCluster().getFileSystem(0);
+final FSNamesystem ns = getCluster().getNamesystem(0);
+writeFile(fileSys, file, replicas, 1);
 
-DatanodeInfo nodeOutofService = takeNodeOutofService(0,
+final DatanodeInfo nodeOutofService = takeNodeOutofService(0,
 null, Long.MAX_VALUE, null, AdminStates.ENTERING_MAINTENANCE);
 
+// When node is in ENTERING_MAINTENANCE state, it can still serve read
+// requests
+assertNull(checkWithRetry(ns, fileSys, file, replicas, null,
+nodeOutofService));
+
 putNodeInService(0, nodeOutofService.getDatanodeUuid());
 
-cleanupFile(fileSys, file1);
+cleanupFile(fileSys, file);
   }
 
   /**
@@ -80,23 +102,21 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
   public void testEnteringMaintenanceExpiration() throws Exception {
 LOG.info("Starting testEnteringMaintenanceExpiration");
 final int replicas = 1;
-final int numNamenodes = 1;
-final int numDatanodes = 1;
-final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+final Path file = new Path("/testEnteringMaintenanceExpiration.dat");
 
-startCluster(numNamenodes, numDatanodes);
+startCluster(1, 1);
 
-FileSystem fileSys = getCluster().getFileSystem(0);
-writeFile(fileSys, file1, replicas, 1);
+final FileSystem fileSys = getCluster().getFileSystem(0);
+writeFile(fileSys, file, replicas, 1);
 
-// expires in 500 milliseconds
-DatanodeInfo nodeOutofService = 

[03/50] [abbrv] hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-21 Thread aengineer
HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/391ce535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/391ce535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/391ce535

Branch: refs/heads/HDFS-7240
Commit: 391ce535a739dc92cb90017d759217265a4fd969
Parents: 30bb197
Author: Vinitha Reddy Gankidi 
Authored: Fri Oct 14 10:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Oct 14 18:13:54 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 75 ++--
 .../blockmanagement/DatanodeDescriptor.java | 48 -
 .../blockmanagement/DatanodeStorageInfo.java| 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  4 +-
 .../blockmanagement/TestBlockManager.java   | 19 +++--
 .../TestNameNodePrunesMissingStorages.java  | 70 +++---
 .../server/datanode/BlockReportTestBase.java| 50 +
 .../TestAddOverReplicatedStripedBlocks.java |  4 ++
 8 files changed, 147 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/391ce535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7949439..7b13add 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1347,6 +1347,8 @@ public class BlockManager implements BlockStatsMXBean {
   }
 }
 checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -2191,7 +2193,7 @@ public class BlockManager implements BlockStatsMXBean {
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
   final BlockListAsLongs newReport,
-  BlockReportContext context, boolean lastStorageInRpc) throws IOException 
{
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -2245,32 +2247,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-long leaseId = blockReportLeaseManager.removeLease(node);
-BlockManagerFaultInjector.getInstance().
-removeBlockReportLease(node, leaseId);
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -2295,36 +2271,25 @@ public class BlockManager implements BlockStatsMXBean {
 return !node.hasStaleStorages();
   }
 
-  private void removeZombieReplicas(BlockReportContext context,
-  DatanodeStorageInfo zombie) {
-LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
-"longer exists on the DataNode.",
-Long.toHexString(context.getReportId()), zombie.getStorageID());
-assert(namesystem.hasWriteLock());
-Iterator iter = zombie.getBlockIterator();
-int prevBlocks = zombie.numBlocks();
-while (iter.hasNext()) {
-  BlockInfo block = iter.next();
-  // We assume that a block can be on only one storage in a DataNode.
-  // That's why we pass in the DatanodeDescriptor 

[17/50] [abbrv] hadoop git commit: HADOOP-13522. Add %A and %a formats for fs -stat command to print permissions. Contributed by Alex Garbarini.

2016-10-21 Thread aengineer
HADOOP-13522. Add %A and %a formats for fs -stat command to print permissions. 
Contributed by Alex Garbarini.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bedfec0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bedfec0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bedfec0c

Branch: refs/heads/HDFS-7240
Commit: bedfec0c10144087168bc79501ffd5ab4fa52606
Parents: 0bc6d37
Author: Akira Ajisaka 
Authored: Tue Oct 18 14:37:32 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Oct 18 15:00:44 2016 +0900

--
 .../hadoop/fs/permission/FsPermission.java  | 12 
 .../java/org/apache/hadoop/fs/shell/Stat.java   | 11 ++-
 .../src/site/markdown/FileSystemShell.md|  4 ++--
 .../src/test/resources/testConf.xml |  6 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 20 +---
 5 files changed, 46 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bedfec0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 48a5b1c..fabfc12 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -183,6 +183,18 @@ public class FsPermission implements Writable {
 return toShort();
   }
 
+  /**
+   * Returns the FsPermission in an octal format.
+   *
+   * @return short Unlike {@link #toShort()} which provides a binary
+   * representation, this method returns the standard octal style permission.
+   */
+  public short toOctal() {
+int n = this.toShort();
+int octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
+return (short)octal;
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (obj instanceof FsPermission) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bedfec0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
index 458d3ee..42f7843 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.fs.FileStatus;
 /**
  * Print statistics about path in specified format.
  * Format sequences:
+ *   %a: Permissions in octal
+ *   %A: Permissions in symbolic style
  *   %b: Size of file in blocks
  *   %F: Type
  *   %g: Group name of owner
@@ -56,7 +58,8 @@ class Stat extends FsCommand {
   public static final String USAGE = "[format]  ...";
   public static final String DESCRIPTION =
 "Print statistics about the file/directory at " + NEWLINE +
-"in the specified format. Format accepts filesize in" + NEWLINE +
+"in the specified format. Format accepts permissions in" + NEWLINE +
+"octal (%a) and symbolic (%A), filesize in" + NEWLINE +
 "blocks (%b), type (%F), group name of owner (%g)," + NEWLINE +
 "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
 "of owner (%u), modification date (%y, %Y)." + NEWLINE +
@@ -95,6 +98,12 @@ class Stat extends FsCommand {
 // this silently drops a trailing %?
 if (i + 1 == fmt.length) break;
 switch (fmt[++i]) {
+  case 'a':
+buf.append(stat.getPermission().toOctal());
+break;
+  case 'A':
+buf.append(stat.getPermission());
+break;
   case 'b':
 buf.append(stat.getLen());
 break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bedfec0c/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index ee7bc28..060c775 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -639,11 +639,11 @@ stat
 
 

[07/50] [abbrv] hadoop git commit: HADOOP-13661. Upgrade HTrace version. Contributed by Sean Mackrory.

2016-10-21 Thread aengineer
HADOOP-13661. Upgrade HTrace version. Contributed by Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed9fcbec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed9fcbec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed9fcbec

Branch: refs/heads/HDFS-7240
Commit: ed9fcbec544df149d08d9ac31989a7291eff6507
Parents: 1f304b0
Author: Wei-Chiu Chuang 
Authored: Mon Oct 17 05:04:49 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Oct 17 05:04:49 2016 -0700

--
 hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md | 2 +-
 hadoop-project/pom.xml   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9fcbec/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
index cbdee8a..9b7084d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
@@ -48,7 +48,7 @@ LocalFileSpanReceiver is included in the htrace-core4 jar 
which is bundled
 with Hadoop.)
 
 ```
-$ cp htrace-htraced/target/htrace-htraced-4.0.1-incubating.jar 
$HADOOP_HOME/share/hadoop/common/lib/
+$ cp htrace-htraced/target/htrace-htraced-4.1.0-incubating.jar 
$HADOOP_HOME/share/hadoop/common/lib/
 ```
 
 ### Dynamic update of tracing configuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9fcbec/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 82adebf..5826cf6 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -882,7 +882,7 @@
   
 org.apache.htrace
 htrace-core4
-4.0.1-incubating
+4.1.0-incubating
   
   
 org.jdom


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: HDFS-11013. Correct typos in native erasure coding dump code. Contributed by László Bence Nagy.

2016-10-21 Thread aengineer
HDFS-11013. Correct typos in native erasure coding dump code. Contributed by 
László Bence Nagy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b671ee68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b671ee68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b671ee68

Branch: refs/heads/HDFS-7240
Commit: b671ee6846b79a6d106efed7cf7e1209b2cc408d
Parents: 987ee51
Author: Andrew Wang 
Authored: Mon Oct 17 14:14:50 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 14:14:50 2016 -0700

--
 .../main/native/src/org/apache/hadoop/io/erasurecode/dump.c  | 8 
 .../native/src/org/apache/hadoop/io/erasurecode/isal_load.h  | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b671ee68/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
index 20bd189..e48032e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
@@ -57,11 +57,11 @@ void dumpCodingMatrix(unsigned char* buf, int n1, int n2) {
 
 void dumpEncoder(IsalEncoder* pCoder) {
   int numDataUnits = pCoder->coder.numDataUnits;
-  int numParityUnits = pCoder->coder.numDataUnits;
+  int numParityUnits = pCoder->coder.numParityUnits;
   int numAllUnits = pCoder->coder.numAllUnits;
 
-  printf("Encoding (numAlnumParityUnitslUnits = %d, numDataUnits = %d)\n",
-numParityUnits, numDataUnits);
+  printf("Encoding (numAllUnits = %d, numParityUnits = %d, numDataUnits = 
%d)\n",
+numAllUnits, numParityUnits, numDataUnits);
 
   printf("\n\nEncodeMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->encodeMatrix,
@@ -91,7 +91,7 @@ void dumpDecoder(IsalDecoder* pCoder) {
 
   printf("InvertMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->invertMatrix,
-   numDataUnits, numDataUnits);
+   numDataUnits, numAllUnits);
 
   printf("DecodeMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->decodeMatrix,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b671ee68/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
index 7cb7a6a..c46a531 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
@@ -57,7 +57,7 @@ typedef void (*__d_ec_encode_data_update)(int, int, int, int, 
unsigned char*,
 #endif
 
 #ifdef WINDOWS
-// For erasure_code.h
+// For gf_util.h
 typedef unsigned char (__cdecl *__d_gf_mul)(unsigned char, unsigned char);
 typedef unsigned char (__cdecl *__d_gf_inv)(unsigned char);
 typedef void (__cdecl *__d_gf_gen_rs_matrix)(unsigned char *, int, int);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: HADOOP-13737. Cleanup DiskChecker interface. Contributed by Arpit Agarwal.

2016-10-21 Thread aengineer
HADOOP-13737. Cleanup DiskChecker interface. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/262827cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/262827cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/262827cf

Branch: refs/heads/HDFS-7240
Commit: 262827cf75bf9c48cd95335eb04fd8ff1d64c538
Parents: 5e83a21
Author: Anu Engineer 
Authored: Thu Oct 20 13:26:23 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 20 13:35:26 2016 -0700

--
 .../org/apache/hadoop/util/DiskChecker.java | 178 +++
 .../org/apache/hadoop/util/TestDiskChecker.java |  22 ---
 2 files changed, 68 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/262827cf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
index a36a7a0..2c73af8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
@@ -20,9 +20,6 @@ package org.apache.hadoop.util;
 
 import java.io.File;
 import java.io.IOException;
-import java.nio.file.DirectoryStream;
-import java.nio.file.DirectoryIteratorException;
-import java.nio.file.Files;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -53,62 +50,6 @@ public class DiskChecker {
 }
   }
   
-  /** 
-   * The semantics of mkdirsWithExistsCheck method is different from the mkdirs
-   * method provided in the Sun's java.io.File class in the following way:
-   * While creating the non-existent parent directories, this method checks for
-   * the existence of those directories if the mkdir fails at any point (since
-   * that directory might have just been created by some other process).
-   * If both mkdir() and the exists() check fails for any seemingly 
-   * non-existent directory, then we signal an error; Sun's mkdir would signal
-   * an error (return false) if a directory it is attempting to create already
-   * exists or the mkdir fails.
-   * @param dir
-   * @return true on success, false on failure
-   */
-  public static boolean mkdirsWithExistsCheck(File dir) {
-if (dir.mkdir() || dir.exists()) {
-  return true;
-}
-File canonDir = null;
-try {
-  canonDir = dir.getCanonicalFile();
-} catch (IOException e) {
-  return false;
-}
-String parent = canonDir.getParent();
-return (parent != null) && 
-   (mkdirsWithExistsCheck(new File(parent)) &&
-  (canonDir.mkdir() || canonDir.exists()));
-  }
-
-  /**
-   * Recurse down a directory tree, checking all child directories.
-   * @param dir
-   * @throws DiskErrorException
-   */
-  public static void checkDirs(File dir) throws DiskErrorException {
-checkDir(dir);
-IOException ex = null;
-try (DirectoryStream stream =
-Files.newDirectoryStream(dir.toPath())) {
-  for (java.nio.file.Path entry: stream) {
-File child = entry.toFile();
-if (child.isDirectory()) {
-  checkDirs(child);
-}
-  }
-} catch (DirectoryIteratorException de) {
-  ex = de.getCause();
-} catch (IOException ie) {
-  ex = ie;
-}
-if (ex != null) {
-  throw new DiskErrorException("I/O error when open a directory: "
-  + dir.toString(), ex);
-}
-  }
-
   /**
* Create the directory if it doesn't exist and check that dir is readable,
* writable and executable
@@ -121,39 +62,7 @@ public class DiskChecker {
   throw new DiskErrorException("Cannot create directory: "
+ dir.toString());
 }
-checkDirAccess(dir);
-  }
-
-  /**
-   * Create the directory or check permissions if it already exists.
-   *
-   * The semantics of mkdirsWithExistsAndPermissionCheck method is different
-   * from the mkdirs method provided in the Sun's java.io.File class in the
-   * following way:
-   * While creating the non-existent parent directories, this method checks for
-   * the existence of those directories if the mkdir fails at any point (since
-   * that directory might have just been created by some other process).
-   * If both mkdir() and the exists() check fails for any seemingly
-   * non-existent directory, then we signal an error; Sun's mkdir would signal
-   * an error (return false) if a 

[30/50] [abbrv] hadoop git commit: HADOOP-13693. Remove the message about HTTP OPTIONS in SPNEGO initialization message from kms audit log.

2016-10-21 Thread aengineer
HADOOP-13693. Remove the message about HTTP OPTIONS in SPNEGO initialization 
message from kms audit log.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d75cbc57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d75cbc57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d75cbc57

Branch: refs/heads/HDFS-7240
Commit: d75cbc5749808491d2b06f80506d95b6fb1b9e9c
Parents: efdf810
Author: Xiao Chen 
Authored: Tue Oct 18 18:24:37 2016 -0700
Committer: Xiao Chen 
Committed: Tue Oct 18 18:24:59 2016 -0700

--
 .../crypto/key/kms/server/KMSAuthenticationFilter.java| 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d75cbc57/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
index 60f1918..928a8aa 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
@@ -145,9 +145,13 @@ public class KMSAuthenticationFilter
 requestURL.append("?").append(queryString);
   }
 
-  KMSWebApp.getKMSAudit().unauthenticated(
-  request.getRemoteHost(), method, requestURL.toString(),
-  kmsResponse.msg);
+  if (!method.equals("OPTIONS")) {
+// an HTTP OPTIONS request is made as part of the SPNEGO authentication
+// sequence. We do not need to audit log it, since it doesn't belong
+// to KMS context. KMS server doesn't handle OPTIONS either.
+KMSWebApp.getKMSAudit().unauthenticated(request.getRemoteHost(), 
method,
+requestURL.toString(), kmsResponse.msg);
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: HDFS-10699. Log object instance get incorrectly in TestDFSAdmin. Contributed by Yiqun Lin

2016-10-21 Thread aengineer
HDFS-10699. Log object instance get incorrectly in TestDFSAdmin. Contributed by 
Yiqun Lin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fb6b651
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fb6b651
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fb6b651

Branch: refs/heads/HDFS-7240
Commit: 6fb6b651e8d3b58a903a792e7d55f73f8b4032d2
Parents: 5c0bffd
Author: Brahma Reddy Battula 
Authored: Thu Oct 20 21:28:16 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Oct 20 21:28:16 2016 +0530

--
 .../src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fb6b651/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index dca42ea..4bbf05d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -66,7 +66,7 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 public class TestDFSAdmin {
-  private static final Log LOG = LogFactory.getLog(DFSAdmin.class);
+  private static final Log LOG = LogFactory.getLog(TestDFSAdmin.class);
   private Configuration conf = null;
   private MiniDFSCluster cluster;
   private DFSAdmin admin;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: HADOOP-12082 Support multiple authentication schemes via AuthenticationFilter

2016-10-21 Thread aengineer
HADOOP-12082 Support multiple authentication schemes via AuthenticationFilter


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bca3852
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bca3852
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bca3852

Branch: refs/heads/HDFS-7240
Commit: 4bca385241c0fc8ff168c7b0f2984a7aed2c7492
Parents: d75cbc5
Author: Benoy Antony 
Authored: Tue Oct 18 18:32:01 2016 -0700
Committer: Benoy Antony 
Committed: Tue Oct 18 18:32:01 2016 -0700

--
 hadoop-common-project/hadoop-auth/pom.xml   |  42 +++
 .../client/KerberosAuthenticator.java   |   8 +-
 .../server/AuthenticationFilter.java|  47 ++-
 .../server/AuthenticationHandler.java   |   2 +-
 .../server/AuthenticationHandlerUtil.java   | 105 ++
 .../server/CompositeAuthenticationHandler.java  |  30 ++
 .../authentication/server/HttpConstants.java|  55 +++
 .../server/LdapAuthenticationHandler.java   | 339 +++
 .../MultiSchemeAuthenticationHandler.java   | 209 
 .../authentication/server/package-info.java |  27 ++
 .../src/site/markdown/Configuration.md  | 137 
 .../client/TestKerberosAuthenticator.java   |  71 +++-
 .../authentication/server/LdapConstants.java|  31 ++
 .../server/TestLdapAuthenticationHandler.java   | 159 +
 .../TestMultiSchemeAuthenticationHandler.java   | 189 +++
 .../DelegationTokenAuthenticationFilter.java|   9 +-
 .../DelegationTokenAuthenticationHandler.java   |  25 +-
 ...emeDelegationTokenAuthenticationHandler.java | 182 ++
 hadoop-project/pom.xml  |   4 +
 19 files changed, 1649 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bca3852/hadoop-common-project/hadoop-auth/pom.xml
--
diff --git a/hadoop-common-project/hadoop-auth/pom.xml 
b/hadoop-common-project/hadoop-auth/pom.xml
index 4cbdc49..0b37715 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -135,6 +135,48 @@
   org.apache.kerby
   kerb-simplekdc
 
+
+  org.apache.directory.server
+  apacheds-core
+  ${apacheds.version}
+  test
+
+
+  org.apache.directory.server
+  apacheds-protocol-ldap
+  ${apacheds.version}
+  test
+
+
+  org.apache.directory.server
+  apacheds-ldif-partition
+  ${apacheds.version}
+  test
+
+
+  org.apache.directory.api
+  api-ldap-codec-core
+  ${ldap-api.version}
+  test
+
+
+  org.apache.directory.api
+  api-ldap-model
+  ${ldap-api.version}
+  test
+
+
+  org.apache.directory.server
+  apacheds-server-integ
+  ${apacheds.version}
+  test
+
+
+  org.apache.directory.server
+  apacheds-core-integ
+  ${apacheds.version}
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bca3852/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index a69ee46..ceec927 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -14,6 +14,7 @@
 package org.apache.hadoop.security.authentication.client;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.security.authentication.server.HttpConstants;
 import org.apache.hadoop.security.authentication.util.AuthToken;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
@@ -57,17 +58,18 @@ public class KerberosAuthenticator implements Authenticator 
{
   /**
* HTTP header used by the SPNEGO server endpoint during an authentication 
sequence.
*/
-  public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
+  public static final String WWW_AUTHENTICATE =
+  HttpConstants.WWW_AUTHENTICATE_HEADER;
 
   /**
* HTTP header used by the SPNEGO client endpoint during an authentication 
sequence.
*/
-  public static final String AUTHORIZATION = "Authorization";
+  public static final String AUTHORIZATION = 

[43/50] [abbrv] hadoop git commit: HADOOP-13716. Add LambdaTestUtils class for tests; fix eventual consistency problem in contract test setup. Contributed by Steve Loughran.

2016-10-21 Thread aengineer
HADOOP-13716. Add LambdaTestUtils class for tests; fix eventual consistency 
problem in contract test setup. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fbf4cd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fbf4cd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fbf4cd5

Branch: refs/heads/HDFS-7240
Commit: 3fbf4cd5da13dde68b77e581ea2d4aa564c8c8b7
Parents: 6d2da38
Author: Anu Engineer 
Authored: Thu Oct 20 12:33:58 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 20 12:33:58 2016 -0700

--
 .../AbstractContractRootDirectoryTest.java  |  48 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |   6 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java | 521 +++
 .../apache/hadoop/test/TestLambdaTestUtils.java | 395 ++
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |  20 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  48 --
 6 files changed, 962 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbf4cd5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
index 0a8f464..5fba4bf 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
@@ -27,12 +27,16 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.test.LambdaTestUtils;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.deleteChildren;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.dumpStats;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.listChildren;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.toList;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk;
@@ -45,6 +49,7 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk;
 public abstract class AbstractContractRootDirectoryTest extends 
AbstractFSContractTestBase {
   private static final Logger LOG =
   LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class);
+  public static final int OBJECTSTORE_RETRY_TIMEOUT = 3;
 
   @Override
   public void setup() throws Exception {
@@ -79,23 +84,34 @@ public abstract class AbstractContractRootDirectoryTest 
extends AbstractFSContra
 // extra sanity checks here to avoid support calls about complete loss
 // of data
 skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
-Path root = new Path("/");
+final Path root = new Path("/");
 assertIsDirectory(root);
-// make sure it is clean
-FileSystem fs = getFileSystem();
-deleteChildren(fs, root, true);
-FileStatus[] children = listChildren(fs, root);
-if (children.length > 0) {
-  StringBuilder error = new StringBuilder();
-  error.append("Deletion of child entries failed, still have")
-  .append(children.length)
-  .append(System.lineSeparator());
-  for (FileStatus child : children) {
-error.append("  ").append(child.getPath())
-.append(System.lineSeparator());
-  }
-  fail(error.toString());
-}
+// make sure the directory is clean. This includes some retry logic
+// to forgive blobstores whose listings can be out of sync with the file
+// status;
+final FileSystem fs = getFileSystem();
+final AtomicInteger iterations = new AtomicInteger(0);
+final FileStatus[] originalChildren = listChildren(fs, root);
+LambdaTestUtils.eventually(
+OBJECTSTORE_RETRY_TIMEOUT,
+new Callable() {
+  @Override
+  public Void call() throws Exception {
+FileStatus[] deleted = deleteChildren(fs, root, true);
+FileStatus[] children = listChildren(fs, root);
+if (children.length > 0) {
+  fail(String.format(
+  "After %d attempts: listing after rm /* not empty"
+  + 

[49/50] [abbrv] hadoop git commit: HDFS-10730. Fix some failed tests due to BindException. Contributed by Yiqun Lin

2016-10-21 Thread aengineer
HDFS-10730. Fix some failed tests due to BindException. Contributed by Yiqun Lin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f63cd78f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f63cd78f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f63cd78f

Branch: refs/heads/HDFS-7240
Commit: f63cd78f6008bf7cfc9ee74217ed6f3d4f5bec5c
Parents: 754cb4e
Author: Brahma Reddy Battula 
Authored: Fri Oct 21 18:16:39 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Fri Oct 21 18:16:39 2016 +0530

--
 .../java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java   | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f63cd78f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
index d223354..b532443 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -252,7 +252,7 @@ public class TestDecommissionWithStriped {
 Thread.sleep(3000); // grace period to trigger decommissioning call
 // start datanode so that decommissioning live node will be finished
 for (DataNodeProperties dnp : stoppedDns) {
-  cluster.restartDataNode(dnp, true);
+  cluster.restartDataNode(dnp);
   LOG.info("Restarts stopped datanode:{} to trigger block reconstruction",
   dnp.datanode);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f63cd78f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
index 908ab0c..8f83ba5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
@@ -479,7 +479,7 @@ public class TestFileChecksum {
 }
 
 if (dnIdxToDie != -1) {
-  cluster.restartDataNode(dnIdxToDie, true);
+  cluster.restartDataNode(dnIdxToDie);
 }
 
 return fc;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: HADOOP-13735 ITestS3AFileContextStatistics.testStatistics() failing. Contributed by Pieter Reuse

2016-10-21 Thread aengineer
HADOOP-13735 ITestS3AFileContextStatistics.testStatistics() failing. 
Contributed by Pieter Reuse


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ae270af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ae270af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ae270af

Branch: refs/heads/HDFS-7240
Commit: 9ae270af02c243993f853513c731cb268430e492
Parents: 73504b1
Author: Steve Loughran 
Authored: Thu Oct 20 14:50:30 2016 +0100
Committer: Steve Loughran 
Committed: Thu Oct 20 14:50:59 2016 +0100

--
 .../java/org/apache/hadoop/fs/s3a/S3AFileSystem.java   |  2 +-
 .../org/apache/hadoop/fs/s3a/S3AInstrumentation.java   | 13 ++---
 2 files changed, 11 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ae270af/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 2354819..9908ba7 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -548,7 +548,7 @@ public class S3AFileSystem extends FileSystem {
   progress,
   partSize,
   blockFactory,
-  instrumentation.newOutputStreamStatistics(),
+  instrumentation.newOutputStreamStatistics(statistics),
   new WriteOperationHelper(key)
   ),
   null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ae270af/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
index 963c53f..fb8c852 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
@@ -37,6 +37,7 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.fs.FileSystem.Statistics;
 
 import static org.apache.hadoop.fs.s3a.Statistic.*;
 
@@ -639,9 +640,8 @@ public class S3AInstrumentation {
* Create a stream output statistics instance.
* @return the new instance
*/
-
-  OutputStreamStatistics newOutputStreamStatistics() {
-return new OutputStreamStatistics();
+  OutputStreamStatistics newOutputStreamStatistics(Statistics statistics) {
+return new OutputStreamStatistics(statistics);
   }
 
   /**
@@ -677,6 +677,12 @@ public class S3AInstrumentation {
 private final AtomicLong queueDuration = new AtomicLong(0);
 private final AtomicLong exceptionsInMultipartFinalize = new AtomicLong(0);
 
+private Statistics statistics;
+
+public OutputStreamStatistics(Statistics statistics){
+  this.statistics = statistics;
+}
+
 /**
  * Block is queued for upload.
  */
@@ -717,6 +723,7 @@ public class S3AInstrumentation {
 /** Intermediate report of bytes uploaded. */
 void bytesTransferred(long byteCount) {
   bytesUploaded.addAndGet(byteCount);
+  statistics.incrementBytesWritten(byteCount);
   bytesPendingUpload.addAndGet(-byteCount);
   incrementGauge(STREAM_WRITE_BLOCK_UPLOADS_DATA_PENDING, -byteCount);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by Hanisha Koneru.

2016-10-21 Thread aengineer
HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c62ae710
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c62ae710
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c62ae710

Branch: refs/heads/HDFS-7240
Commit: c62ae7107f025091652e79db3edfca5c4dc84e4a
Parents: 6c348c5
Author: Xiaoyu Yao 
Authored: Mon Oct 17 15:25:24 2016 -0700
Committer: Xiaoyu Yao 
Committed: Tue Oct 18 14:05:43 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  32 +-
 .../hdfs/TestTrashWithEncryptionZones.java  | 188 
 .../TestTrashWithSecureEncryptionZones.java | 443 +++
 3 files changed, 662 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62ae710/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 963aaa6..7f26b03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -27,6 +27,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.BufferedOutputStream;
@@ -114,7 +115,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -167,6 +167,7 @@ import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Level;
 import org.junit.Assume;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.annotations.VisibleForTesting;
 import static 
org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
@@ -2054,4 +2055,33 @@ public class DFSTestUtil {
   }
 }
   }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+  boolean shouldExistInTrash) throws Exception {
+Path trashPath = Path.mergePaths(shell.getCurrentTrashDir(path), path);
+
+verifyDelete(shell, fs, path, trashPath, shouldExistInTrash);
+  }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+  Path trashPath, boolean shouldExistInTrash) throws Exception {
+assertTrue(path + " file does not exist", fs.exists(path));
+
+// Verify that trashPath has a path component named ".Trash"
+Path checkTrash = trashPath;
+while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
+  checkTrash = checkTrash.getParent();
+}
+assertEquals("No .Trash component found in trash path " + trashPath,
+".Trash", checkTrash.getName());
+
+String[] argv = new String[]{"-rm", "-r", path.toString()};
+int res = ToolRunner.run(shell, argv);
+assertEquals("rm failed", 0, res);
+if (shouldExistInTrash) {
+  assertTrue("File not in trash : " + trashPath, fs.exists(trashPath));
+} else {
+  assertFalse("File in trash : " + trashPath, fs.exists(trashPath));
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62ae710/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
new file mode 100644
index 000..2a8d493
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with 

[33/50] [abbrv] hadoop git commit: YARN-5561. [Atsv2] : Support for ability to retrieve apps/app-attempt/containers and entities via REST. Contributed by Rohith Sharma K S.

2016-10-21 Thread aengineer
YARN-5561. [Atsv2] : Support for ability to retrieve 
apps/app-attempt/containers and entities via REST. Contributed by Rohith Sharma 
K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9c4616b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9c4616b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9c4616b

Branch: refs/heads/HDFS-7240
Commit: e9c4616b5e47e9c616799abc532269572ab24e6e
Parents: c5573e6
Author: Sangjin Lee 
Authored: Wed Oct 19 09:45:23 2016 -0700
Committer: Sangjin Lee 
Committed: Wed Oct 19 09:45:23 2016 -0700

--
 .../reader/TimelineReaderWebServices.java   | 739 +++
 .../reader/TestTimelineReaderWebServices.java   | 185 +
 .../TestFileSystemTimelineReaderImpl.java   |  44 +-
 3 files changed, 964 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c4616b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index fcab78c..db0c4e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -2120,4 +2120,743 @@ public class TimelineReaderWebServices {
 infofilters, conffilters, metricfilters, eventfilters,
 confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
   }
+
+  /**
+   * Return a set of application-attempt entities for a given applicationId.
+   * Cluster ID is not provided by client so default cluster ID has to be 
taken.
+   * If userid, flow name and flowrun id which are optional query parameters 
are
+   * not specified, they will be queried based on app id and default cluster id
+   * from the flow context information stored in underlying storage
+   * implementation. If number of matching entities are more than the limit,
+   * most recent entities till the limit is reached, will be returned.
+   *
+   * @param req Servlet request.
+   * @param res Servlet response.
+   * @param appId Application id to which the entities to be queried belong to(
+   *  Mandatory path param).
+   * @param userId User id which should match for the entities(Optional query
+   *  param)
+   * @param flowName Flow name which should match for the entities(Optional
+   *  query param).
+   * @param flowRunId Run id which should match for the entities(Optional query
+   *  param).
+   * @param limit If specified, defines the number of entities to return. The
+   *  maximum possible value for limit can be {@link Long#MAX_VALUE}. 
If
+   *  it is not specified or has a value less than 0, then limit will 
be
+   *  considered as 100. (Optional query param).
+   * @param createdTimeStart If specified, matched entities should not be
+   *  created before this timestamp(Optional query param).
+   * @param createdTimeEnd If specified, matched entities should not be created
+   *  after this timestamp(Optional query param).
+   * @param relatesTo If specified, matched entities should relate to given
+   *  entities associated with a entity type. relatesto is a comma
+   *  separated list in the format
+   *  [entitytype]:[entityid1]:[entityid2]... (Optional query param).
+   * @param isRelatedTo If specified, matched entities should be related to
+   *  given entities associated with a entity type. relatesto is a 
comma
+   *  separated list in the format
+   *  [entitytype]:[entityid1]:[entityid2]... (Optional query param).
+   * @param infofilters If specified, matched entities should have exact 
matches
+   *  to the given info represented as key-value pairs. This is
+   *  represented as infofilters=info1:value1,info2:value2... (Optional
+   *  query param).
+   * @param conffilters If specified, matched entities should have exact 
matches
+   *  to the given configs represented as key-value pairs. This is

[28/50] [abbrv] hadoop git commit: MAPREDUCE-6791. remove unnecessary dependency from hadoop-mapreduce-client-jobclient to hadoop-mapreduce-client-shuffle (haibochen via rkanter)

2016-10-21 Thread aengineer
MAPREDUCE-6791. remove unnecessary dependency from 
hadoop-mapreduce-client-jobclient to hadoop-mapreduce-client-shuffle (haibochen 
via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29caf6d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29caf6d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29caf6d7

Branch: refs/heads/HDFS-7240
Commit: 29caf6d7df54a77b11399bb5f73d62b3b38ae912
Parents: c62ae71
Author: Robert Kanter 
Authored: Tue Oct 18 18:00:29 2016 -0700
Committer: Robert Kanter 
Committed: Tue Oct 18 18:00:29 2016 -0700

--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29caf6d7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index c4fef7e..cd181fe 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -42,10 +42,6 @@
 
 
   org.apache.hadoop
-  hadoop-mapreduce-client-shuffle
-
-
-  org.apache.hadoop
   hadoop-mapreduce-client-app
   test
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2016-10-21 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9e45ed3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9e45ed3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9e45ed3

Branch: refs/heads/HDFS-7240
Commit: a9e45ed3ec73d4574eba70a3a51566cdb2eea19c
Parents: 9176dd2 f63cd78
Author: Anu Engineer 
Authored: Fri Oct 21 09:43:53 2016 -0700
Committer: Anu Engineer 
Committed: Fri Oct 21 09:43:53 2016 -0700

--
 hadoop-common-project/hadoop-auth/pom.xml   |  42 +
 .../client/KerberosAuthenticator.java   |   8 +-
 .../server/AuthenticationFilter.java|  47 +-
 .../server/AuthenticationHandler.java   |   2 +-
 .../server/AuthenticationHandlerUtil.java   | 105 +++
 .../server/CompositeAuthenticationHandler.java  |  30 +
 .../authentication/server/HttpConstants.java|  55 ++
 .../server/LdapAuthenticationHandler.java   | 339 
 .../MultiSchemeAuthenticationHandler.java   | 209 +
 .../authentication/server/package-info.java |  27 +
 .../src/site/markdown/Configuration.md  | 137 +++
 .../client/AuthenticatorTestCase.java   |  49 +-
 .../client/TestKerberosAuthenticator.java   |  71 +-
 .../authentication/server/LdapConstants.java|  31 +
 .../server/TestLdapAuthenticationHandler.java   | 159 
 .../TestMultiSchemeAuthenticationHandler.java   | 189 
 .../hadoop/fs/CommonConfigurationKeys.java  |  26 -
 .../java/org/apache/hadoop/fs/FileSystem.java   |  14 +-
 .../apache/hadoop/fs/RawLocalFileSystem.java|   5 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java|  11 +-
 .../hadoop/fs/permission/FsPermission.java  |  12 +
 .../java/org/apache/hadoop/fs/shell/Stat.java   |  11 +-
 .../hadoop/fs/viewfs/ChRootedFileSystem.java|   5 +
 .../org/apache/hadoop/fs/viewfs/InodeTree.java  | 206 ++---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  99 +--
 .../apache/hadoop/io/erasurecode/CodecUtil.java | 168 +++-
 .../io/erasurecode/ErasureCodeConstants.java|   3 +-
 .../io/erasurecode/ErasureCodecOptions.java |  37 +
 .../erasurecode/codec/AbstractErasureCodec.java |  53 --
 .../io/erasurecode/codec/DummyErasureCodec.java |  45 +
 .../io/erasurecode/codec/ErasureCodec.java  |  76 +-
 .../io/erasurecode/codec/HHXORErasureCodec.java |  20 +-
 .../io/erasurecode/codec/RSErasureCodec.java|  20 +-
 .../io/erasurecode/codec/XORErasureCodec.java   |  22 +-
 .../io/erasurecode/codec/package-info.java  |  28 +
 .../erasurecode/coder/AbstractErasureCoder.java |  64 --
 .../coder/AbstractErasureCodingStep.java|  61 --
 .../coder/AbstractErasureDecoder.java   | 170 
 .../coder/AbstractErasureEncoder.java   |  62 --
 .../coder/AbstractHHErasureCodingStep.java  |  49 --
 .../erasurecode/coder/DummyErasureDecoder.java  |  46 +
 .../erasurecode/coder/DummyErasureEncoder.java  |  45 +
 .../io/erasurecode/coder/ErasureCoder.java  |  25 +-
 .../io/erasurecode/coder/ErasureCodingStep.java |   8 +-
 .../io/erasurecode/coder/ErasureDecoder.java| 198 +
 .../erasurecode/coder/ErasureDecodingStep.java  |  21 +-
 .../io/erasurecode/coder/ErasureEncoder.java|  91 ++
 .../erasurecode/coder/ErasureEncodingStep.java  |  22 +-
 .../erasurecode/coder/HHErasureCodingStep.java  |  68 ++
 .../erasurecode/coder/HHXORErasureDecoder.java  |  24 +-
 .../coder/HHXORErasureDecodingStep.java |   2 +-
 .../erasurecode/coder/HHXORErasureEncoder.java  |  19 +-
 .../coder/HHXORErasureEncodingStep.java |   2 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  |  16 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  20 +-
 .../io/erasurecode/coder/XORErasureDecoder.java |  15 +-
 .../io/erasurecode/coder/XORErasureEncoder.java |  16 +-
 .../io/erasurecode/coder/package-info.java  |  28 +
 .../io/erasurecode/rawcoder/CoderUtil.java  |   2 +-
 .../DelegationTokenAuthenticationFilter.java|   9 +-
 .../DelegationTokenAuthenticationHandler.java   |  25 +-
 ...emeDelegationTokenAuthenticationHandler.java | 182 
 .../org/apache/hadoop/util/DiskChecker.java | 178 ++--
 .../src/org/apache/hadoop/io/erasurecode/dump.c |   8 +-
 .../apache/hadoop/io/erasurecode/isal_load.h|   2 +-
 .../src/main/resources/core-default.xml |  74 +-
 .../src/site/markdown/ClusterSetup.md   |   2 +-
 .../src/site/markdown/Compatibility.md  |  16 +-
 .../src/site/markdown/FileSystemShell.md|   4 +-
 .../site/markdown/InterfaceClassification.md|  28 +-
 .../hadoop-common/src/site/markdown/Tracing.md  |   2 +-
 .../src/site/markdown/filesystem/filesystem.md  |  20 +-
 .../markdown/filesystem/fsdatainputstream.md|  16 +-
 .../site/markdown/filesystem/introduction.md|  12 +-
 

[42/50] [abbrv] hadoop git commit: HDFS-11018. Incorrect check and message in FsDatasetImpl#invalidate. Contributed by Yiqun Lin.

2016-10-21 Thread aengineer
HDFS-11018. Incorrect check and message in FsDatasetImpl#invalidate. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d2da38d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d2da38d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d2da38d

Branch: refs/heads/HDFS-7240
Commit: 6d2da38d16cebe9b82f1048f87127eecee33664c
Parents: f872c6b
Author: Wei-Chiu Chuang 
Authored: Thu Oct 20 10:49:39 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Oct 20 10:56:39 2016 -0700

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 31 +---
 1 file changed, 21 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d2da38d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index ba653ac..84569f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -786,8 +786,14 @@ class FsDatasetImpl implements FsDatasetSpi {
   throws ReplicaNotFoundException {
 ReplicaInfo info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
 if (info == null) {
-  throw new ReplicaNotFoundException(
-  ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
+  if (volumeMap.get(b.getBlockPoolId(), b.getLocalBlock().getBlockId())
+  == null) {
+throw new ReplicaNotFoundException(
+ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
+  } else {
+throw new ReplicaNotFoundException(
+ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b);
+  }
 }
 return info;
   }
@@ -1878,16 +1884,21 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   try (AutoCloseableLock lock = datasetLock.acquire()) {
 final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
 if (info == null) {
-  // It is okay if the block is not found -- it may be deleted earlier.
-  LOG.info("Failed to delete replica " + invalidBlks[i]
-  + ": ReplicaInfo not found.");
-  continue;
-}
-if (info.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
-  errors.add("Failed to delete replica " + invalidBlks[i]
-  + ": GenerationStamp not matched, info=" + info);
+  ReplicaInfo infoByBlockId =
+  volumeMap.get(bpid, invalidBlks[i].getBlockId());
+  if (infoByBlockId == null) {
+// It is okay if the block is not found -- it
+// may be deleted earlier.
+LOG.info("Failed to delete replica " + invalidBlks[i]
++ ": ReplicaInfo not found.");
+  } else {
+errors.add("Failed to delete replica " + invalidBlks[i]
++ ": GenerationStamp not matched, existing replica is "
++ Block.toString(infoByBlockId));
+  }
   continue;
 }
+
 v = (FsVolumeImpl)info.getVolume();
 if (v == null) {
   errors.add("Failed to delete replica " + invalidBlks[i]


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: HDFS-9820. Improve distcp to support efficient restore to an earlier snapshot. Contributed by Yongjun Zhang.

2016-10-21 Thread aengineer
HDFS-9820. Improve distcp to support efficient restore to an earlier snapshot. 
Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8650cc84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8650cc84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8650cc84

Branch: refs/heads/HDFS-7240
Commit: 8650cc84f20e7d8c32dcdcd91c94372d476e2276
Parents: b456410
Author: Yongjun Zhang 
Authored: Fri Oct 14 15:17:33 2016 -0700
Committer: Yongjun Zhang 
Committed: Wed Oct 19 17:37:54 2016 -0700

--
 .../java/org/apache/hadoop/tools/DiffInfo.java  |  47 +-
 .../java/org/apache/hadoop/tools/DistCp.java|  37 +-
 .../apache/hadoop/tools/DistCpConstants.java|   1 +
 .../apache/hadoop/tools/DistCpOptionSwitch.java |   5 +
 .../org/apache/hadoop/tools/DistCpOptions.java  |  79 +-
 .../org/apache/hadoop/tools/DistCpSync.java | 256 --
 .../org/apache/hadoop/tools/OptionsParser.java  |  27 +-
 .../apache/hadoop/tools/SimpleCopyListing.java  |  17 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java |   4 +-
 .../hadoop/tools/TestDistCpSyncReverseBase.java | 868 +++
 .../tools/TestDistCpSyncReverseFromSource.java  |  36 +
 .../tools/TestDistCpSyncReverseFromTarget.java  |  36 +
 .../apache/hadoop/tools/TestOptionsParser.java  |  85 +-
 13 files changed, 1343 insertions(+), 155 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8650cc84/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
index 79bb7fe..7e56301 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
@@ -44,28 +44,49 @@ class DiffInfo {
   };
 
   /** The source file/dir of the rename or deletion op */
-  final Path source;
+  private Path source;
+  /** The target file/dir of the rename op. Null means the op is deletion. */
+  private Path target;
+
+  private SnapshotDiffReport.DiffType type;
   /**
* The intermediate file/dir for the op. For a rename or a delete op,
* we first rename the source to this tmp file/dir.
*/
   private Path tmp;
-  /** The target file/dir of the rename op. Null means the op is deletion. */
-  Path target;
-
-  private final SnapshotDiffReport.DiffType type;
-
-  public SnapshotDiffReport.DiffType getType(){
-return this.type;
-  }
 
-  DiffInfo(Path source, Path target, SnapshotDiffReport.DiffType type) {
+  DiffInfo(final Path source, final Path target,
+  SnapshotDiffReport.DiffType type) {
 assert source != null;
 this.source = source;
 this.target= target;
 this.type = type;
   }
 
+  void setSource(final Path source) {
+this.source = source;
+  }
+
+  Path getSource() {
+return source;
+  }
+
+  void setTarget(final Path target) {
+this.target = target;
+  }
+
+  Path getTarget() {
+return target;
+  }
+
+  public void setType(final SnapshotDiffReport.DiffType type){
+this.type = type;
+  }
+
+  public SnapshotDiffReport.DiffType getType(){
+return type;
+  }
+
   void setTmp(Path tmp) {
 this.tmp = tmp;
   }
@@ -73,4 +94,10 @@ class DiffInfo {
   Path getTmp() {
 return tmp;
   }
+
+  @Override
+  public String toString() {
+return type + ": src=" + String.valueOf(source) + " tgt="
++ String.valueOf(target) + " tmp=" + String.valueOf(tmp);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8650cc84/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index be58f13..ab58e9c 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -77,6 +77,24 @@ public class DistCp extends Configured implements Tool {
   private boolean submitted;
   private FileSystem jobFS;
 
+  private void prepareFileListing(Job job) throws Exception {
+if (inputOptions.shouldUseSnapshotDiff()) {
+  // When "-diff" or "-rdiff" is passed, do sync() first, then
+  // create copyListing based on snapshot diff.
+  DistCpSync distCpSync = new DistCpSync(inputOptions, getConf());
+  if (distCpSync.sync()) {
+createInputFileListingWithDiff(job, 

[38/50] [abbrv] hadoop git commit: HDFS-9480. Expose nonDfsUsed via StorageTypeStats. Contributed by Brahma Reddy Battula

2016-10-21 Thread aengineer
HDFS-9480. Expose nonDfsUsed via StorageTypeStats. Contributed by Brahma Reddy 
Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c73be13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c73be13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c73be13

Branch: refs/heads/HDFS-7240
Commit: 4c73be135ca6ee2ba0b075a507097900db206b09
Parents: 9ae270a
Author: Brahma Reddy Battula 
Authored: Thu Oct 20 20:45:45 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Oct 20 20:45:45 2016 +0530

--
 .../server/blockmanagement/StorageTypeStats.java| 16 +---
 1 file changed, 13 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c73be13/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java
index 005e6d5..978009e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java
@@ -32,16 +32,19 @@ import org.apache.hadoop.classification.InterfaceStability;
 public class StorageTypeStats {
   private long capacityTotal = 0L;
   private long capacityUsed = 0L;
+  private long capacityNonDfsUsed = 0L;
   private long capacityRemaining = 0L;
   private long blockPoolUsed = 0L;
   private int nodesInService = 0;
 
-  @ConstructorProperties({"capacityTotal",
-  "capacityUsed", "capacityRemaining",  "blockPoolUsed", "nodesInService"})
-  public StorageTypeStats(long capacityTotal, long capacityUsed,
+  @ConstructorProperties({"capacityTotal", "capacityUsed", 
"capacityNonDfsUsed",
+  "capacityRemaining", "blockPoolUsed", "nodesInService"})
+  public StorageTypeStats(
+  long capacityTotal, long capacityUsed, long capacityNonDfsUsedUsed,
   long capacityRemaining, long blockPoolUsed, int nodesInService) {
 this.capacityTotal = capacityTotal;
 this.capacityUsed = capacityUsed;
+this.capacityNonDfsUsed = capacityNonDfsUsedUsed;
 this.capacityRemaining = capacityRemaining;
 this.blockPoolUsed = blockPoolUsed;
 this.nodesInService = nodesInService;
@@ -55,6 +58,10 @@ public class StorageTypeStats {
 return capacityUsed;
   }
 
+  public long getCapacityNonDfsUsed() {
+return capacityNonDfsUsed;
+  }
+
   public long getCapacityRemaining() {
 return capacityRemaining;
   }
@@ -72,6 +79,7 @@ public class StorageTypeStats {
   StorageTypeStats(StorageTypeStats other) {
 capacityTotal = other.capacityTotal;
 capacityUsed = other.capacityUsed;
+capacityNonDfsUsed = other.capacityNonDfsUsed;
 capacityRemaining = other.capacityRemaining;
 blockPoolUsed = other.blockPoolUsed;
 nodesInService = other.nodesInService;
@@ -80,6 +88,7 @@ public class StorageTypeStats {
   void addStorage(final DatanodeStorageInfo info,
   final DatanodeDescriptor node) {
 capacityUsed += info.getDfsUsed();
+capacityNonDfsUsed += info.getNonDfsUsed();
 blockPoolUsed += info.getBlockPoolUsed();
 if (node.isInService()) {
   capacityTotal += info.getCapacity();
@@ -98,6 +107,7 @@ public class StorageTypeStats {
   void subtractStorage(final DatanodeStorageInfo info,
   final DatanodeDescriptor node) {
 capacityUsed -= info.getDfsUsed();
+capacityNonDfsUsed -= info.getNonDfsUsed();
 blockPoolUsed -= info.getBlockPoolUsed();
 if (node.isInService()) {
   capacityTotal -= info.getCapacity();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HDFS-10998. Add unit tests for HDFS command 'dfsadmin -fetchImage' in HA. Contributed by Xiaobing Zhou

2016-10-21 Thread aengineer
HDFS-10998. Add unit tests for HDFS command 'dfsadmin -fetchImage' in HA. 
Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7d87dee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7d87dee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7d87dee

Branch: refs/heads/HDFS-7240
Commit: d7d87deece66333c188e9b7c10b4b56ddb529ce9
Parents: 262827c
Author: Mingliang Liu 
Authored: Thu Oct 20 19:51:48 2016 -0700
Committer: Mingliang Liu 
Committed: Thu Oct 20 19:51:48 2016 -0700

--
 .../org/apache/hadoop/hdfs/TestFetchImage.java  | 105 ++-
 1 file changed, 79 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7d87dee/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
index d8218b6..7e1e593 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
@@ -17,10 +17,15 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
 import static org.junit.Assert.assertEquals;
 
 import java.io.File;
+import java.io.IOException;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -29,11 +34,16 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestFetchImage {
@@ -43,46 +53,89 @@ public class TestFetchImage {
   // Shamelessly stolen from NNStorage.
   private static final Pattern IMAGE_REGEX = Pattern.compile("fsimage_(\\d+)");
 
+  private MiniDFSCluster cluster;
+  private NameNode nn0 = null;
+  private NameNode nn1 = null;
+  private Configuration conf = null;
+
+  @BeforeClass
+  public static void setupImageDir() {
+FETCHED_IMAGE_FILE.mkdirs();
+  }
+
   @AfterClass
   public static void cleanup() {
 FileUtil.fullyDelete(FETCHED_IMAGE_FILE);
   }
 
+  @Before
+  public void setupCluster() throws IOException, URISyntaxException {
+conf = new Configuration();
+conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
+conf.setInt(DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+conf.setLong(DFS_BLOCK_SIZE_KEY, 1024);
+
+cluster = new MiniDFSCluster.Builder(conf)
+.nnTopology(MiniDFSNNTopology.simpleHATopology())
+.numDataNodes(1)
+.build();
+nn0 = cluster.getNameNode(0);
+nn1 = cluster.getNameNode(1);
+HATestUtil.configureFailoverFs(cluster, conf);
+cluster.waitActive();
+  }
+
   /**
* Download a few fsimages using `hdfs dfsadmin -fetchImage ...' and verify
* the results.
*/
-  @Test
-  public void testFetchImage() throws Exception {
-FETCHED_IMAGE_FILE.mkdirs();
-Configuration conf = new Configuration();
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
-FileSystem fs = null;
-try {
-  DFSAdmin dfsAdmin = new DFSAdmin();
-  dfsAdmin.setConf(conf);
-  
+  @Test(timeout=3)
+  public void testFetchImageHA() throws Exception {
+final Path parent = new Path(
+PathUtils.getTestPath(getClass()),
+GenericTestUtils.getMethodName());
+
+int nnIndex = 0;
+/* run on nn0 as active */
+cluster.transitionToActive(nnIndex);
+testFetchImageInternal(
+nnIndex,
+new Path(parent, "dir1"),
+new Path(parent, "dir2"));
+
+/* run on nn1 as active */
+nnIndex = 1;
+HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
+cluster.transitionToActive(nnIndex);
+testFetchImageInternal(
+nnIndex,
+new Path(parent, "dir3"),
+new Path(parent, "dir4"));
+  }
+
+  private void 

[32/50] [abbrv] hadoop git commit: HDFS-11009. Add a tool to reconstruct block meta file from CLI.

2016-10-21 Thread aengineer
HDFS-11009. Add a tool to reconstruct block meta file from CLI.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5573e6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5573e6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5573e6a

Branch: refs/heads/HDFS-7240
Commit: c5573e6a7599da17cad733cd274e7a9b75b22bb0
Parents: 4bca385
Author: Xiao Chen 
Authored: Tue Oct 18 18:32:27 2016 -0700
Committer: Xiao Chen 
Committed: Tue Oct 18 22:42:28 2016 -0700

--
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |   4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   2 +-
 .../datanode/fsdataset/impl/FsDatasetUtil.java  |  32 ++
 .../apache/hadoop/hdfs/tools/DebugAdmin.java| 107 +--
 .../src/site/markdown/HDFSCommands.md   |  22 +++-
 .../hadoop/hdfs/tools/TestDebugAdmin.java   |  56 +-
 6 files changed, 204 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5573e6a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index 4f4c7b2..b2fd487 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -284,7 +284,7 @@ public class DfsClientConf {
 return classes;
   }
 
-  private DataChecksum.Type getChecksumType(Configuration conf) {
+  private static DataChecksum.Type getChecksumType(Configuration conf) {
 final String checksum = conf.get(
 DFS_CHECKSUM_TYPE_KEY,
 DFS_CHECKSUM_TYPE_DEFAULT);
@@ -299,7 +299,7 @@ public class DfsClientConf {
   }
 
   // Construct a checksum option from conf
-  private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
+  public static ChecksumOpt getChecksumOptFromConf(Configuration conf) {
 DataChecksum.Type type = getChecksumType(conf);
 int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
 DFS_BYTES_PER_CHECKSUM_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5573e6a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index fd747bd..ba653ac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1031,7 +1031,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
* @param conf the {@link Configuration}
* @throws IOException
*/
-  private static void computeChecksum(ReplicaInfo srcReplica, File dstMeta,
+  static void computeChecksum(ReplicaInfo srcReplica, File dstMeta,
   int smallBufferSize, final Configuration conf)
   throws IOException {
 File srcMeta = new File(srcReplica.getMetadataURI());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5573e6a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
index a4d433d..563f66a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
@@ -21,13 +21,19 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FilenameFilter;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.RandomAccessFile;
+import java.net.URI;
 import java.util.Arrays;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
+import 

[48/50] [abbrv] hadoop git commit: YARN-5047. Refactor nodeUpdate across schedulers. (Ray Chiang via kasha)

2016-10-21 Thread aengineer
YARN-5047. Refactor nodeUpdate across schedulers. (Ray Chiang via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/754cb4e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/754cb4e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/754cb4e3

Branch: refs/heads/HDFS-7240
Commit: 754cb4e30fac1c5fe8d44626968c0ddbfe459335
Parents: a064865
Author: Karthik Kambatla 
Authored: Thu Oct 20 21:17:48 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Oct 20 21:17:48 2016 -0700

--
 .../scheduler/AbstractYarnScheduler.java| 186 ++-
 .../scheduler/capacity/CapacityScheduler.java   | 122 ++--
 .../scheduler/fair/FairScheduler.java   |  80 +---
 .../scheduler/fifo/FifoScheduler.java   |  94 +++---
 ...estProportionalCapacityPreemptionPolicy.java |   4 +-
 5 files changed, 225 insertions(+), 261 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/754cb4e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 645e06d..df59556 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
@@ -73,7 +74,12 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReco
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
+import org.apache.hadoop.yarn.server.utils.Lock;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.SettableFuture;
@@ -94,10 +100,14 @@ public abstract class AbstractYarnScheduler
   protected Resource minimumAllocation;
 
   protected volatile RMContext rmContext;
-  
+
   private volatile Priority maxClusterLevelAppPriority;
 
   protected ActivitiesManager activitiesManager;
+  protected SchedulerHealth schedulerHealth = new SchedulerHealth();
+  protected volatile long lastNodeUpdateTime;
+
+  private volatile Clock clock;
 
   /*
* All schedulers which are inheriting AbstractYarnScheduler should use
@@ -130,6 +140,7 @@ public abstract class AbstractYarnScheduler
*/
   public AbstractYarnScheduler(String name) {
 super(name);
+clock = SystemClock.getInstance();
 ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
 readLock = lock.readLock();
 writeLock = lock.writeLock();
@@ -228,13 +239,25 @@ public abstract class AbstractYarnScheduler
 nodeTracker.setConfiguredMaxAllocation(maximumAllocation);
   }
 
+  public SchedulerHealth getSchedulerHealth() {
+return this.schedulerHealth;
+  }
+
+  protected void setLastNodeUpdateTime(long time) {
+this.lastNodeUpdateTime = time;
+  }
+
+  public long getLastNodeUpdateTime() {
+return lastNodeUpdateTime;
+  }
+
   protected void containerLaunchedOnNode(
   ContainerId containerId, SchedulerNode node) {
 try {
   readLock.lock();
   // Get 

[26/50] [abbrv] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-21 Thread aengineer
HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. 
Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c348c56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c348c56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c348c56

Branch: refs/heads/HDFS-7240
Commit: 6c348c56918973fd988b110e79231324a8befe12
Parents: b733a6f
Author: Steve Loughran 
Authored: Tue Oct 18 19:33:38 2016 +0100
Committer: Steve Loughran 
Committed: Tue Oct 18 21:16:02 2016 +0100

--
 .../src/main/resources/core-default.xml |  74 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |  16 +-
 hadoop-tools/hadoop-aws/pom.xml |  58 +-
 .../s3a/BlockingThreadPoolExecutorService.java  | 168 +---
 .../org/apache/hadoop/fs/s3a/Constants.java |  71 +-
 .../hadoop/fs/s3a/S3ABlockOutputStream.java | 703 
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java | 821 +++
 .../hadoop/fs/s3a/S3AFastOutputStream.java  | 410 -
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 408 +++--
 .../hadoop/fs/s3a/S3AInstrumentation.java   | 248 +-
 .../apache/hadoop/fs/s3a/S3AOutputStream.java   |  57 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  39 +
 .../fs/s3a/SemaphoredDelegatingExecutor.java| 230 ++
 .../org/apache/hadoop/fs/s3a/Statistic.java |  32 +-
 .../src/site/markdown/tools/hadoop-aws/index.md | 668 +--
 .../fs/contract/s3a/ITestS3AContractDistCp.java |  10 +-
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  |   1 +
 .../ITestBlockingThreadPoolExecutorService.java |  48 +-
 .../hadoop/fs/s3a/ITestS3ABlockOutputArray.java |  90 ++
 .../fs/s3a/ITestS3ABlockOutputByteBuffer.java   |  30 +
 .../hadoop/fs/s3a/ITestS3ABlockOutputDisk.java  |  30 +
 .../fs/s3a/ITestS3ABlockingThreadPool.java  |   2 +
 .../hadoop/fs/s3a/ITestS3AConfiguration.java|  29 +
 .../ITestS3AEncryptionBlockOutputStream.java|  36 +
 .../s3a/ITestS3AEncryptionFastOutputStream.java |  35 -
 .../hadoop/fs/s3a/ITestS3AFastOutputStream.java |  74 --
 .../apache/hadoop/fs/s3a/ITestS3ATestUtils.java |  98 +++
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |  75 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  | 148 +++-
 .../apache/hadoop/fs/s3a/TestDataBlocks.java| 124 +++
 .../ITestS3AFileContextStatistics.java  |   1 +
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java | 412 ++
 .../fs/s3a/scale/ITestS3ADeleteManyFiles.java   |  19 +-
 .../s3a/scale/ITestS3AHugeFilesArrayBlocks.java |  31 +
 .../ITestS3AHugeFilesByteBufferBlocks.java  |  34 +
 .../scale/ITestS3AHugeFilesClassicOutput.java   |  41 +
 .../s3a/scale/ITestS3AHugeFilesDiskBlocks.java  |  31 +
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 151 ++--
 38 files changed, 4647 insertions(+), 906 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c348c56/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 4882728..daa421c 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -994,8 +994,8 @@
 
   fs.s3a.threads.max
   10
-   Maximum number of concurrent active (part)uploads,
-which each use a thread from the threadpool.
+  The total number of threads available in the filesystem for data
+uploads *or any other queued filesystem operation*.
 
 
 
@@ -1008,8 +1008,7 @@
 
   fs.s3a.max.total.tasks
   5
-  Number of (part)uploads allowed to the queue before
-blocking additional uploads.
+  The number of operations which can be queued for 
execution
 
 
 
@@ -1047,13 +1046,21 @@
   fs.s3a.multipart.purge
   false
   True if you want to purge existing multipart uploads that may 
not have been
- completed/aborted correctly
+completed/aborted correctly. The corresponding purge age is defined in
+fs.s3a.multipart.purge.age.
+If set, when the filesystem is instantiated then all outstanding uploads
+older than the purge age will be terminated -across the entire bucket.
+This will impact multipart uploads by other applications and users. so 
should
+be used sparingly, with an age value chosen to stop failed uploads, without
+breaking ongoing operations.
+  
 
 
 
   fs.s3a.multipart.purge.age
   86400
-  Minimum age in seconds of multipart uploads to 
purge
+  Minimum age in seconds of multipart uploads to purge.
+  
 
 
 
@@ -1086,10 +1093,50 @@
 
   

[44/50] [abbrv] hadoop git commit: HDFS-10976. Report erasure coding policy of EC files in Fsck. Contributed by Wei-Chiu Chuang.

2016-10-21 Thread aengineer
HDFS-10976. Report erasure coding policy of EC files in Fsck. Contributed by 
Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e83a21c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e83a21c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e83a21c

Branch: refs/heads/HDFS-7240
Commit: 5e83a21cb66c78e89ac5af9a130ab0aee596a9f4
Parents: 3fbf4cd
Author: Wei-Chiu Chuang 
Authored: Thu Oct 20 13:02:16 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Oct 20 13:06:43 2016 -0700

--
 .../hadoop/hdfs/server/namenode/NamenodeFsck.java | 14 --
 .../apache/hadoop/hdfs/server/namenode/TestFsck.java  | 14 +-
 2 files changed, 25 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e83a21c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 8302035..a2e249d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -540,11 +541,20 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 res.totalFiles++;
 res.totalSize += fileLen;
 res.totalBlocks += blocks.locatedBlockCount();
+String redundancyPolicy;
+ErasureCodingPolicy ecPolicy = file.getErasureCodingPolicy();
+if (ecPolicy == null) { // a replicated file
+  redundancyPolicy = "replicated: replication=" +
+  file.getReplication() + ",";
+} else {
+  redundancyPolicy = "erasure-coded: policy=" + ecPolicy.getName() + ",";
+}
+
 if (showOpenFiles && isOpen) {
-  out.print(path + " " + fileLen + " bytes, " +
+  out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
 blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
 } else if (showFiles) {
-  out.print(path + " " + fileLen + " bytes, " +
+  out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
 blocks.locatedBlockCount() + " block(s): ");
 } else if (showprogress) {
   out.print('.');

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e83a21c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index aa41e9b..254a86c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -1700,9 +1700,21 @@ public class TestFsck {
 // restart the cluster; bring up namenode but not the data nodes
 cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(0).format(false).build();
-outStr = runFsck(conf, 1, true, "/");
+outStr = runFsck(conf, 1, true, "/", "-files", "-blocks");
 // expect the result is corrupt
 assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+String[] outLines = outStr.split("\\r?\\n");
+for (String line: outLines) {
+  if (line.contains(largeFilePath.toString())) {
+final HdfsFileStatus file = cluster.getNameNode().getRpcServer().
+getFileInfo(largeFilePath.toString());
+assertTrue(line.contains("policy=" +
+file.getErasureCodingPolicy().getName()));
+  } else if (line.contains(replFilePath.toString())) {
+assertTrue(line.contains("replication=" + cluster.getFileSystem().
+getFileStatus(replFilePath).getReplication()));
+  }
+}
 System.out.println(outStr);
   }
 



[18/50] [abbrv] hadoop git commit: HADOOP-13061. Refactor erasure coders. Contributed by Kai Sasaki

2016-10-21 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c023c748/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index afaaf24..6e679c3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
@@ -32,15 +31,11 @@ import 
org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
  * It implements {@link ErasureCoder}.
  */
 @InterfaceAudience.Private
-public class RSErasureDecoder extends AbstractErasureDecoder {
+public class RSErasureDecoder extends ErasureDecoder {
   private RawErasureDecoder rsRawDecoder;
 
-  public RSErasureDecoder(int numDataUnits, int numParityUnits) {
-super(numDataUnits, numParityUnits);
-  }
-
-  public RSErasureDecoder(ECSchema schema) {
-super(schema);
+  public RSErasureDecoder(ErasureCoderOptions options) {
+super(options);
   }
 
   @Override
@@ -56,11 +51,8 @@ public class RSErasureDecoder extends AbstractErasureDecoder 
{
 
   private RawErasureDecoder checkCreateRSRawDecoder() {
 if (rsRawDecoder == null) {
-  // TODO: we should create the raw coder according to codec.
-  ErasureCoderOptions coderOptions = new ErasureCoderOptions(
-  getNumDataUnits(), getNumParityUnits());
   rsRawDecoder = CodecUtil.createRawDecoder(getConf(),
-  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
+  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
 }
 return rsRawDecoder;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c023c748/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
index 2139113..7a09b92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
@@ -32,15 +31,11 @@ import 
org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
  * It implements {@link ErasureCoder}.
  */
 @InterfaceAudience.Private
-public class RSErasureEncoder extends AbstractErasureEncoder {
+public class RSErasureEncoder extends ErasureEncoder {
   private RawErasureEncoder rawEncoder;
 
-  public RSErasureEncoder(int numDataUnits, int numParityUnits) {
-super(numDataUnits, numParityUnits);
-  }
-
-  public RSErasureEncoder(ECSchema schema) {
-super(schema);
+  public RSErasureEncoder(ErasureCoderOptions options) {
+super(options);
   }
 
   @Override
@@ -57,10 +52,8 @@ public class RSErasureEncoder extends AbstractErasureEncoder 
{
   private RawErasureEncoder checkCreateRSRawEncoder() {
 if (rawEncoder == null) {
   // TODO: we should create the raw coder according to codec.
-  ErasureCoderOptions coderOptions = new ErasureCoderOptions(
-  getNumDataUnits(), getNumParityUnits());
   rawEncoder = CodecUtil.createRawEncoder(getConf(),
-  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
+  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
 }
 return rawEncoder;
   }
@@ -71,4 +64,9 @@ public class RSErasureEncoder extends AbstractErasureEncoder {
   rawEncoder.release();
 }
   }
+
+  @Override
+  

[21/50] [abbrv] hadoop git commit: YARN-5743. [Atsv2] Publish queue name and RMAppMetrics to ATS (Rohith Sharma K S via Varun Saxena)

2016-10-21 Thread aengineer
YARN-5743. [Atsv2] Publish queue name and RMAppMetrics to ATS (Rohith Sharma K 
S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b154d3ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b154d3ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b154d3ed

Branch: refs/heads/HDFS-7240
Commit: b154d3edcee95254d41c237142870f39e826a519
Parents: d26a1bb
Author: Varun Saxena 
Authored: Tue Oct 18 23:32:52 2016 +0530
Committer: Varun Saxena 
Committed: Tue Oct 18 23:32:52 2016 +0530

--
 .../metrics/ApplicationMetricsConstants.java| 16 ++-
 .../metrics/TimelineServiceV2Publisher.java | 49 ++--
 .../TestSystemMetricsPublisherForV2.java| 18 ---
 3 files changed, 70 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b154d3ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
index 1774208..521e0af 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
@@ -71,10 +71,22 @@ public class ApplicationMetricsConstants {
   "YARN_APPLICATION_STATE";
   
   public static final String APP_CPU_METRICS =
-  "YARN_APPLICATION_CPU_METRIC";
+  "YARN_APPLICATION_CPU";
   
   public static final String APP_MEM_METRICS =
-  "YARN_APPLICATION_MEM_METRIC";
+  "YARN_APPLICATION_MEMORY";
+
+  public static final String APP_RESOURCE_PREEMPTED_CPU =
+  "YARN_APPLICATION_RESOURCE_PREEMPTED_CPU";
+
+  public static final String APP_RESOURCE_PREEMPTED_MEM =
+  "YARN_APPLICATION_RESOURCE_PREEMPTED_MEMORY";
+
+  public static final String APP_NON_AM_CONTAINER_PREEMPTED =
+  "YARN_APPLICATION_NON_AM_CONTAINER_PREEMPTED";
+
+  public static final String APP_AM_CONTAINER_PREEMPTED =
+  "YARN_APPLICATION_AM_CONTAINER_PREEMPTED";
 
   public static final String LATEST_APP_ATTEMPT_EVENT_INFO =
   "YARN_APPLICATION_LATEST_APP_ATTEMPT";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b154d3ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index dbdc1a8..f039ebe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.yarn.server.resourcemanager.metrics;
 
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -40,6 +42,7 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants;
@@ -104,6 +107,8 @@ public class TimelineServiceV2Publisher extends 

[41/50] [abbrv] hadoop git commit: HADOOP-13236. truncate will fail when we use viewfilesystem. Contributed by Brahma Reddy Battula

2016-10-21 Thread aengineer
HADOOP-13236. truncate will fail when we use viewfilesystem. Contributed by 
Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f872c6bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f872c6bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f872c6bc

Branch: refs/heads/HDFS-7240
Commit: f872c6bc0390415f13e95b99749b0b1a690991b7
Parents: 6fb6b65
Author: Brahma Reddy Battula 
Authored: Thu Oct 20 21:47:17 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Oct 20 21:47:17 2016 +0530

--
 .../hadoop/fs/viewfs/ChRootedFileSystem.java|   5 +
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |   2 +-
 .../viewfs/TestViewFileSystemWithTruncate.java  | 123 +++
 3 files changed, 129 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f872c6bc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 747ba20..9f61af6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -355,6 +355,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean truncate(Path path, long newLength) throws IOException {
+return super.truncate(fullPath(path), newLength);
+  }
+
+  @Override
   public List listXAttrs(Path path) throws IOException {
 return super.listXAttrs(fullPath(path));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f872c6bc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index f2a91d1..ef224d8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -502,7 +502,7 @@ public class ViewFileSystem extends FileSystem {
   throws IOException {
 InodeTree.ResolveResult res =
 fsState.resolve(getUriPath(f), true);
-return res.targetFileSystem.truncate(f, newLength);
+return res.targetFileSystem.truncate(res.remainingPath, newLength);
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f872c6bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
new file mode 100644
index 000..0b99cfe
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+
+import com.google.common.base.Supplier;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import 

[19/50] [abbrv] hadoop git commit: HADOOP-13061. Refactor erasure coders. Contributed by Kai Sasaki

2016-10-21 Thread aengineer
HADOOP-13061. Refactor erasure coders. Contributed by Kai Sasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c023c748
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c023c748
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c023c748

Branch: refs/heads/HDFS-7240
Commit: c023c748869063fb67d14ea996569c42578d1cea
Parents: bedfec0
Author: Kai Zheng 
Authored: Tue Oct 18 12:02:53 2016 +0600
Committer: Kai Zheng 
Committed: Tue Oct 18 12:02:53 2016 +0600

--
 .../hadoop/fs/CommonConfigurationKeys.java  |  26 ---
 .../apache/hadoop/io/erasurecode/CodecUtil.java | 168 ++--
 .../io/erasurecode/ErasureCodeConstants.java|   3 +-
 .../io/erasurecode/ErasureCodecOptions.java |  37 
 .../erasurecode/codec/AbstractErasureCodec.java |  53 -
 .../io/erasurecode/codec/DummyErasureCodec.java |  45 +
 .../io/erasurecode/codec/ErasureCodec.java  |  76 +--
 .../io/erasurecode/codec/HHXORErasureCodec.java |  20 +-
 .../io/erasurecode/codec/RSErasureCodec.java|  20 +-
 .../io/erasurecode/codec/XORErasureCodec.java   |  22 ++-
 .../io/erasurecode/codec/package-info.java  |  28 +++
 .../erasurecode/coder/AbstractErasureCoder.java |  64 --
 .../coder/AbstractErasureCodingStep.java|  61 --
 .../coder/AbstractErasureDecoder.java   | 170 
 .../coder/AbstractErasureEncoder.java   |  62 --
 .../coder/AbstractHHErasureCodingStep.java  |  49 -
 .../erasurecode/coder/DummyErasureDecoder.java  |  46 +
 .../erasurecode/coder/DummyErasureEncoder.java  |  45 +
 .../io/erasurecode/coder/ErasureCoder.java  |  25 ++-
 .../io/erasurecode/coder/ErasureCodingStep.java |   8 +-
 .../io/erasurecode/coder/ErasureDecoder.java| 198 +++
 .../erasurecode/coder/ErasureDecodingStep.java  |  21 +-
 .../io/erasurecode/coder/ErasureEncoder.java|  91 +
 .../erasurecode/coder/ErasureEncodingStep.java  |  22 ++-
 .../erasurecode/coder/HHErasureCodingStep.java  |  68 +++
 .../erasurecode/coder/HHXORErasureDecoder.java  |  24 +--
 .../coder/HHXORErasureDecodingStep.java |   2 +-
 .../erasurecode/coder/HHXORErasureEncoder.java  |  19 +-
 .../coder/HHXORErasureEncodingStep.java |   2 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  |  16 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  20 +-
 .../io/erasurecode/coder/XORErasureDecoder.java |  15 +-
 .../io/erasurecode/coder/XORErasureEncoder.java |  16 +-
 .../io/erasurecode/coder/package-info.java  |  28 +++
 .../io/erasurecode/rawcoder/CoderUtil.java  |   2 +-
 .../conf/TestCommonConfigurationFields.java |   5 +-
 .../erasurecode/TestCodecRawCoderMapping.java   |   3 +-
 .../codec/TestHHXORErasureCodec.java|   6 +-
 .../erasurecode/coder/TestErasureCoderBase.java |  13 +-
 .../coder/TestHHXORErasureCoder.java|   4 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |   4 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |   3 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   6 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |   4 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |   4 +-
 45 files changed, 964 insertions(+), 660 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c023c748/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 2b530f0..fe522b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -21,9 +21,6 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
-import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
-import 
org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy;
-import org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory;
 
 /** 
  * This class contains constants for configuration keys used
@@ -160,30 +157,7 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final boolean IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT =
   false;
 
-  /**
-   * Erasure Coding configuration family
-   */
 
-  /** Supported erasure 

[47/50] [abbrv] hadoop git commit: YARN-4911. Bad placement policy in FairScheduler causes the RM to crash

2016-10-21 Thread aengineer
YARN-4911. Bad placement policy in FairScheduler causes the RM to crash


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a064865a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a064865a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a064865a

Branch: refs/heads/HDFS-7240
Commit: a064865abf7dceee46d3c42eca67a04a25af9d4e
Parents: d7d87de
Author: Karthik Kambatla 
Authored: Thu Oct 20 20:57:04 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Oct 20 20:57:04 2016 -0700

--
 .../scheduler/fair/FairScheduler.java   |  6 +
 .../scheduler/fair/TestFairScheduler.java   | 28 
 .../fair/TestQueuePlacementPolicy.java  |  9 +--
 3 files changed, 41 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a064865a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 8daf0f3..d33c214 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -774,6 +774,12 @@ public class FairScheduler extends
   appRejectMsg = queueName + " is not a leaf queue";
 }
   }
+} catch (IllegalStateException se) {
+  appRejectMsg = "Unable to match app " + rmApp.getApplicationId() +
+  " to a queue placement policy, and no valid terminal queue " +
+  " placement rule is configured. Please contact an administrator " +
+  " to confirm that the fair scheduler configuration contains a " +
+  " valid terminal queue placement rule.";
 } catch (InvalidQueueNameException qne) {
   appRejectMsg = qne.getMessage();
 } catch (IOException ioe) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a064865a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 98af8b9..7535f69 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -1605,6 +1605,34 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
   }
 
   @Test
+  public void testAssignToBadDefaultQueue() throws Exception {
+conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+
+PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
+out.println("");
+out.println("");
+out.println("");
+out.println("");
+out.println("");
+out.println("");
+out.println("");
+out.close();
+scheduler.init(conf);
+scheduler.start();
+scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+RMApp rmApp1 = new MockRMApp(0, 0, RMAppState.NEW);
+
+try {
+  FSLeafQueue queue1 = scheduler.assignToQueue(rmApp1, "default",
+  "asterix");
+} catch (IllegalStateException ise) {
+  fail("Bad queue placement policy terminal rule should not throw " +
+  "exception ");
+}
+  }
+
+  @Test
   public void testAssignToNonLeafQueueReturnsNull() throws Exception {
 conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "true");
 scheduler.init(conf);


[25/50] [abbrv] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-21 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c348c56/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
deleted file mode 100644
index c25d0fb..000
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
+++ /dev/null
@@ -1,410 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.event.ProgressEvent;
-import com.amazonaws.event.ProgressListener;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
-import com.amazonaws.services.s3.model.CannedAccessControlList;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
-import com.amazonaws.services.s3.model.ObjectMetadata;
-import com.amazonaws.services.s3.model.PartETag;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
-import com.amazonaws.services.s3.model.UploadPartRequest;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.util.Progressable;
-import org.slf4j.Logger;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-
-import static org.apache.hadoop.fs.s3a.S3AUtils.*;
-import static org.apache.hadoop.fs.s3a.Statistic.*;
-
-/**
- * Upload files/parts asap directly from a memory buffer (instead of buffering
- * to a file).
- * 
- * Uploads are managed low-level rather than through the AWS TransferManager.
- * This allows for uploading each part of a multi-part upload as soon as
- * the bytes are in memory, rather than waiting until the file is closed.
- * 
- * Unstable: statistics and error handling might evolve
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class S3AFastOutputStream extends OutputStream {
-
-  private static final Logger LOG = S3AFileSystem.LOG;
-  private final String key;
-  private final String bucket;
-  private final AmazonS3 client;
-  private final int partSize;
-  private final int multiPartThreshold;
-  private final S3AFileSystem fs;
-  private final CannedAccessControlList cannedACL;
-  private final ProgressListener progressListener;
-  private final ListeningExecutorService executorService;
-  private MultiPartUpload multiPartUpload;
-  private boolean closed;
-  private ByteArrayOutputStream buffer;
-  private int bufferLimit;
-
-
-  /**
-   * Creates a fast OutputStream that uploads to S3 from memory.
-   * For MultiPartUploads, as soon as sufficient bytes have been written to
-   * the stream a part is uploaded immediately (by using the low-level
-   * multi-part upload API on the AmazonS3Client).
-   *
-   * @param client AmazonS3Client used for S3 calls
-   * @param fs S3AFilesystem
-   * @param bucket S3 bucket name
-   * @param key S3 key name
-   * @param progress report progress in order to prevent timeouts
-   * @param cannedACL used CannedAccessControlList
-   * @param partSize size of a single part in a multi-part upload (except
-   * last part)
-   * @param multiPartThreshold files at least this size use multi-part upload
-   * @param threadPoolExecutor thread factory
-   * @throws IOException on any problem
-   */
-  public S3AFastOutputStream(AmazonS3 client,
- 

[36/50] [abbrv] hadoop git commit: HDFS-11025. TestDiskspaceQuotaUpdate fails in trunk due to Bind exception. Contributed by Yiqun Lin

2016-10-21 Thread aengineer
HDFS-11025. TestDiskspaceQuotaUpdate fails in trunk due to Bind exception. 
Contributed by Yiqun Lin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73504b1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73504b1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73504b1b

Branch: refs/heads/HDFS-7240
Commit: 73504b1bdc4b93c64741de5eb9d022817fdfa22f
Parents: 8650cc8
Author: Brahma Reddy Battula 
Authored: Thu Oct 20 10:41:18 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Oct 20 10:41:18 2016 +0530

--
 .../hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73504b1b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
index deb5208..4b0ac08 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
@@ -427,7 +427,7 @@ public class TestDiskspaceQuotaUpdate {
   testQuotaIssuesWhileCommittingHelper(nnSpy, (short) 1, (short) 1);
 } finally {
   for (MiniDFSCluster.DataNodeProperties dnprop : dnprops) {
-cluster.restartDataNode(dnprop, true);
+cluster.restartDataNode(dnprop);
   }
   cluster.waitActive();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: HDFS-10752. Several log refactoring/improvement suggestion in HDFS. Contributed by Hanisha Koneru.

2016-10-21 Thread aengineer
HDFS-10752. Several log refactoring/improvement suggestion in HDFS. Contributed 
by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4564103
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4564103
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4564103

Branch: refs/heads/HDFS-7240
Commit: b4564103e4709caa1135f6ccc2864d90e54f2ac9
Parents: e9c4616
Author: Arpit Agarwal 
Authored: Wed Oct 19 17:20:07 2016 -0700
Committer: Arpit Agarwal 
Committed: Wed Oct 19 17:20:07 2016 -0700

--
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java  | 16 
 .../server/blockmanagement/CorruptReplicasMap.java  |  4 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   | 10 +-
 3 files changed, 15 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4564103/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
index 7bf93ad..e26fac5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
@@ -69,7 +69,7 @@ class OpenFileCtxCache {
 Iterator> it = openFileMap.entrySet()
 .iterator();
 if (LOG.isTraceEnabled()) {
-  LOG.trace("openFileMap size:" + openFileMap.size());
+  LOG.trace("openFileMap size:" + size());
 }
 
 Entry idlest = null;
@@ -117,10 +117,10 @@ class OpenFileCtxCache {
   boolean put(FileHandle h, OpenFileCtx context) {
 OpenFileCtx toEvict = null;
 synchronized (this) {
-  Preconditions.checkState(openFileMap.size() <= this.maxStreams,
-  "stream cache size " + openFileMap.size()
-  + "  is larger than maximum" + this.maxStreams);
-  if (openFileMap.size() == this.maxStreams) {
+  Preconditions.checkState(size() <= this.maxStreams,
+  "stream cache size " + size() + "  is larger than maximum" + this
+  .maxStreams);
+  if (size() == this.maxStreams) {
 Entry pairs = getEntryToEvict();
 if (pairs ==null) {
   return false;
@@ -149,7 +149,7 @@ class OpenFileCtxCache {
 Iterator> it = openFileMap.entrySet()
 .iterator();
 if (LOG.isTraceEnabled()) {
-  LOG.trace("openFileMap size:" + openFileMap.size());
+  LOG.trace("openFileMap size:" + size());
 }
 
 while (it.hasNext()) {
@@ -168,7 +168,7 @@ class OpenFileCtxCache {
 openFileMap.remove(handle);
 if (LOG.isDebugEnabled()) {
   LOG.debug("After remove stream " + handle.getFileId()
-  + ", the stream number:" + openFileMap.size());
+  + ", the stream number:" + size());
 }
 ctxToRemove.add(ctx2);
   }
@@ -201,7 +201,7 @@ class OpenFileCtxCache {
   Iterator> it = openFileMap.entrySet()
   .iterator();
   if (LOG.isTraceEnabled()) {
-LOG.trace("openFileMap size:" + openFileMap.size());
+LOG.trace("openFileMap size:" + size());
   }
 
   while (it.hasNext()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4564103/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index 35468da..8a097a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -84,12 +84,12 @@ public class CorruptReplicasMap{
 if (!nodes.keySet().contains(dn)) {
   NameNode.blockStateChangeLog.debug(
   "BLOCK NameSystem.addToCorruptReplicasMap: {} added as corrupt on "
-  + "{} by {} {}", blk.getBlockName(), dn, Server.getRemoteIp(),
+  + "{} by {} {}", blk, dn, Server.getRemoteIp(),
   reasonText);
  

[29/50] [abbrv] hadoop git commit: HADOOP-7352. FileSystem#listStatus should throw IOE upon access error. Contributed by John Zhuge.

2016-10-21 Thread aengineer
HADOOP-7352. FileSystem#listStatus should throw IOE upon access error. 
Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efdf810c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efdf810c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efdf810c

Branch: refs/heads/HDFS-7240
Commit: efdf810cf9f72d78e97e860576c64a382ece437c
Parents: 29caf6d
Author: Xiao Chen 
Authored: Tue Oct 18 18:18:43 2016 -0700
Committer: Xiao Chen 
Committed: Tue Oct 18 18:18:43 2016 -0700

--
 .../java/org/apache/hadoop/fs/FileSystem.java   | 14 +---
 .../apache/hadoop/fs/RawLocalFileSystem.java|  5 +---
 .../src/site/markdown/filesystem/filesystem.md  |  3 +++
 .../hadoop/fs/FSMainOperationsBaseTest.java | 24 +---
 .../apache/hadoop/fs/shell/TestPathData.java| 19 
 .../apache/hadoop/hdfs/web/TestTokenAspect.java |  6 ++---
 .../apache/hadoop/tools/TestDistCpWithAcls.java |  2 +-
 .../hadoop/tools/TestDistCpWithXAttrs.java  |  2 +-
 8 files changed, 54 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efdf810c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index cc062c4..39b5b95 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1524,13 +1524,14 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* 
* Does not guarantee to return the List of files/directories status in a
* sorted order.
+   * 
+   * Will not return null. Expect IOException upon access error.
* @param f given path
* @return the statuses of the files/directories in the given patch
-   * @throws FileNotFoundException when the path does not exist;
-   * IOException see specific implementation
+   * @throws FileNotFoundException when the path does not exist
+   * @throws IOException see specific implementation
*/
-  public abstract FileStatus[] listStatus(Path f) throws 
FileNotFoundException, 
- IOException;
+  public abstract FileStatus[] listStatus(Path f) throws IOException;
 
   /**
* Represents a batch of directory entries when iteratively listing a
@@ -1600,10 +1601,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   private void listStatus(ArrayList results, Path f,
   PathFilter filter) throws FileNotFoundException, IOException {
 FileStatus listing[] = listStatus(f);
-if (listing == null) {
-  throw new IOException("Error accessing " + f);
-}
-
+Preconditions.checkNotNull(listing, "listStatus should not return NULL");
 for (int i = 0; i < listing.length; i++) {
   if (filter.accept(listing[i].getPath())) {
 results.add(listing[i]);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efdf810c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 0fcddcf..5e6cb05 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -466,10 +466,7 @@ public class RawLocalFileSystem extends FileSystem {
 }
 
 if (localf.isDirectory()) {
-  String[] names = localf.list();
-  if (names == null) {
-return null;
-  }
+  String[] names = FileUtil.list(localf);
   results = new FileStatus[names.length];
   int j = 0;
   for (int i = 0; i < names.length; i++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efdf810c/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index d927b8b..063bd97 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ 

[05/50] [abbrv] hadoop git commit: HDFS-11003. Expose XmitsInProgress through DataNodeMXBean. Contributed By Brahma Reddy Battula

2016-10-21 Thread aengineer
HDFS-11003. Expose XmitsInProgress through DataNodeMXBean. Contributed By 
Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f4ae85b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f4ae85b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f4ae85b

Branch: refs/heads/HDFS-7240
Commit: 5f4ae85bd8a20510948696467873498723b06477
Parents: 5ad037d
Author: Brahma Reddy Battula 
Authored: Sat Oct 15 22:28:33 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Sat Oct 15 22:28:33 2016 +0530

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java  | 5 +++--
 .../org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java | 6 ++
 .../apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java | 6 +-
 3 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index cb8e308..8f65efe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2101,8 +2101,9 @@ public class DataNode extends ReconfigurableBase
   }
 }
   }
-  
-  int getXmitsInProgress() {
+
+  @Override //DataNodeMXBean
+  public int getXmitsInProgress() {
 return xmitsInProgress.get();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
index 5ec4cda..5d4c218 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
@@ -101,6 +101,12 @@ public interface DataNodeMXBean {
   public int getXceiverCount();
 
   /**
+   * Returns an estimate of the number of data replication/reconstruction tasks
+   * running currently.
+   */
+  public int getXmitsInProgress();
+
+  /**
* Gets the network error counts on a per-Datanode basis.
*/
   public Map> getDatanodeNetworkCounts();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index 8b0d5cb..a77c943 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -96,7 +96,11 @@ public class TestDataNodeMXBean {
   int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
   "XceiverCount");
   Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
-
+  // Ensure mxbean's XmitsInProgress is same as the DataNode's
+  // live value.
+  int xmitsInProgress =
+  (Integer) mbs.getAttribute(mxbeanName, "XmitsInProgress");
+  Assert.assertEquals(datanode.getXmitsInProgress(), xmitsInProgress);
   String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
   "BPServiceActorInfo");
   Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   >