hadoop git commit: HADOOP-13498. The number of multi-part upload part should not bigger than 10000. Contributed by Genmao Yu.

2016-08-23 Thread shimingfei
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12756 787750d1f -> aff1841d0


HADOOP-13498. The number of multi-part upload part should not bigger than 
1. Contributed by Genmao Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aff1841d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aff1841d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aff1841d

Branch: refs/heads/HADOOP-12756
Commit: aff1841d00d444b5422e633a6c966104ef704da9
Parents: 787750d
Author: Mingfei 
Authored: Wed Aug 24 10:09:37 2016 +0800
Committer: Mingfei 
Committed: Wed Aug 24 10:09:45 2016 +0800

--
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  9 
 .../fs/aliyun/oss/AliyunOSSOutputStream.java| 23 ++--
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java| 15 +
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  4 +++-
 .../fs/aliyun/oss/TestOSSOutputStream.java  | 19 
 5 files changed, 53 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aff1841d/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 99a60db..afe7242 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -71,7 +71,6 @@ import org.slf4j.LoggerFactory;
  * Aliyun OSS, used to access OSS blob system in a filesystem style.
  */
 public class AliyunOSSFileSystem extends FileSystem {
-
   private static final Logger LOG =
   LoggerFactory.getLogger(AliyunOSSFileSystem.class);
   private URI uri;
@@ -560,18 +559,18 @@ public class AliyunOSSFileSystem extends FileSystem {
* Used to create an empty file that represents an empty directory.
*
* @param bucket the bucket this directory belongs to
-   * @param objectName directory path
+   * @param key directory path
* @return true if directory successfully created
* @throws IOException
*/
-  private boolean mkdir(final String bucket, final String objectName)
+  private boolean mkdir(final String bucket, final String key)
   throws IOException {
-String dirName = objectName;
+String dirName = key;
 ObjectMetadata dirMeta = new ObjectMetadata();
 byte[] buffer = new byte[0];
 ByteArrayInputStream in = new ByteArrayInputStream(buffer);
 dirMeta.setContentLength(0);
-if (!objectName.endsWith("/")) {
+if (!key.endsWith("/")) {
   dirName += "/";
 }
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aff1841d/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSOutputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSOutputStream.java
index 654b81d..1e16df9 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSOutputStream.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSOutputStream.java
@@ -84,6 +84,9 @@ public class AliyunOSSOutputStream extends OutputStream {
 
 partSize = conf.getLong(MULTIPART_UPLOAD_SIZE_KEY,
 MULTIPART_UPLOAD_SIZE_DEFAULT);
+if (partSize < MIN_MULTIPART_UPLOAD_PART_SIZE) {
+  partSize = MIN_MULTIPART_UPLOAD_PART_SIZE;
+}
 partSizeThreshold = conf.getLong(MIN_MULTIPART_UPLOAD_THRESHOLD_KEY,
 MIN_MULTIPART_UPLOAD_THRESHOLD_DEFAULT);
 
@@ -151,6 +154,12 @@ public class AliyunOSSOutputStream extends OutputStream {
   private void multipartUploadObject() throws IOException {
 File object = tmpFile.getAbsoluteFile();
 long dataLen = object.length();
+long realPartSize = AliyunOSSUtils.calculatePartSize(dataLen, partSize);
+int partNum = (int)(dataLen / realPartSize);
+if (dataLen % realPartSize != 0) {
+  partNum += 1;
+}
+
 InitiateMultipartUploadRequest initiateMultipartUploadRequest =
 new InitiateMultipartUploadRequest(bucketName, key);
 ObjectMetadata meta = new ObjectMetadata();
@@ -161,14 +170,6 @@ public class AliyunOSSOutputStream extends OutputStream {
 initiateMultipartUploadRequest.setObjectMetadata(meta);
 

[31/50] [abbrv] hadoop git commit: HDFS-10692. Update JDiff report's base version for HDFS from 2.6.0 to 2.7.2. Contributed by Wangda Tan.

2016-08-23 Thread cdouglas
HDFS-10692. Update JDiff report's base version for HDFS from 2.6.0 to 2.7.2. 
Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc7a1c54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc7a1c54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc7a1c54

Branch: refs/heads/HDFS-9806
Commit: dc7a1c54f4447fb01980ae61d5d67e90c4f52f00
Parents: 115ecb5
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Authored: Mon Aug 22 11:35:18 2016 -0700
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Committed: Mon Aug 22 11:35:18 2016 -0700

--
 .../jdiff/Apache_Hadoop_HDFS_2.7.2.xml  | 21505 +
 1 file changed, 21505 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: MAPREDUCE-6762. ControlledJob#toString failed with NPE when job status is not successfully updated (Weiwei Yang via Varun Saxena)

2016-08-23 Thread cdouglas
MAPREDUCE-6762. ControlledJob#toString failed with NPE when job status is not 
successfully updated (Weiwei Yang via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d37b45d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d37b45d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d37b45d6

Branch: refs/heads/HDFS-9806
Commit: d37b45d613b768950d1cbe342961cd71776816ae
Parents: 0faee62
Author: Varun Saxena 
Authored: Sun Aug 21 21:46:17 2016 +0530
Committer: Varun Saxena 
Committed: Sun Aug 21 21:46:17 2016 +0530

--
 .../java/org/apache/hadoop/mapreduce/Job.java   |  2 +-
 .../org/apache/hadoop/mapreduce/TestJob.java| 36 
 2 files changed, 37 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d37b45d6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index 33e820b..45c065d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -422,7 +422,7 @@ public class Job extends JobContextImpl implements 
JobContext {
* The user-specified job name.
*/
   public String getJobName() {
-if (state == JobState.DEFINE) {
+if (state == JobState.DEFINE || status == null) {
   return super.getJobName();
 }
 ensureState(JobState.RUNNING);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d37b45d6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
index 71bacf7..60f390f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.JobStatus.State;
+import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -55,6 +56,41 @@ public class TestJob {
   }
 
   @Test
+  public void testUnexpectedJobStatus() throws Exception {
+Cluster cluster = mock(Cluster.class);
+JobID jobid = new JobID("1014873536921", 6);
+ClientProtocol clientProtocol = mock(ClientProtocol.class);
+when(cluster.getClient()).thenReturn(clientProtocol);
+JobStatus status = new JobStatus(jobid, 0f, 0f, 0f, 0f,
+State.RUNNING, JobPriority.DEFAULT, "root",
+"testUnexpectedJobStatus", "job file", "tracking URL");
+when(clientProtocol.getJobStatus(jobid)).thenReturn(status);
+Job job = Job.getInstance(cluster, status, new JobConf());
+
+// ensurer job status is RUNNING
+Assert.assertNotNull(job.getStatus());
+Assert.assertTrue(job.getStatus().getState() == State.RUNNING);
+
+// when updating job status, job client could not retrieve
+// job status, and status reset to null
+when(clientProtocol.getJobStatus(jobid)).thenReturn(null);
+
+try {
+  job.updateStatus();
+} catch (IOException e) {
+  Assert.assertTrue(e != null
+  && e.getMessage().contains("Job status not available"));
+}
+
+try {
+  ControlledJob cj = new ControlledJob(job, null);
+  Assert.assertNotNull(cj.toString());
+} catch (NullPointerException e) {
+  Assert.fail("job API fails with NPE");
+}
+  }
+
+  @Test
   public void testUGICredentialsPropogation() throws Exception {
 Credentials creds = new Credentials();
 Token token = mock(Token.class);



[43/50] [abbrv] hadoop git commit: HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContext.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContext.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContext.java
deleted file mode 100644
index 34d78a5..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContext.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3a.fileContext;
-
-import org.apache.hadoop.fs.TestFileContext;
-
-/**
- * Implementation of TestFileContext for S3a
- */
-public class TestS3AFileContext extends TestFileContext{
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextCreateMkdir.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextCreateMkdir.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextCreateMkdir.java
deleted file mode 100644
index b0c4d84..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextCreateMkdir.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3a.fileContext;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContextCreateMkdirBaseTest;
-import org.apache.hadoop.fs.s3a.S3ATestUtils;
-import org.junit.Before;
-
-/**
- * Extends FileContextCreateMkdirBaseTest for a S3a FileContext
- */
-public class TestS3AFileContextCreateMkdir
-extends FileContextCreateMkdirBaseTest {
-
-  @Before
-  public void setUp() throws IOException, Exception {
-Configuration conf = new Configuration();
-fc = S3ATestUtils.createTestFileContext(conf);
-super.setUp();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextMainOperations.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextMainOperations.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextMainOperations.java
deleted file mode 100644
index 4d200d1..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextMainOperations.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3a.fileContext;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContextMainOperationsBaseTest;
-import org.apache.hadoop.fs.s3a.S3ATestUtils;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-/**
- * S3A implementation of FileContextMainOperationsBaseTest
- */
-public class TestS3AFileContextMainOperations

[21/50] [abbrv] hadoop git commit: MAPREDUCE-6310. Add jdiff support to MapReduce. (Li Lu/vinodkv via wangda)

2016-08-23 Thread cdouglas
MAPREDUCE-6310. Add jdiff support to MapReduce. (Li Lu/vinodkv via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d937457
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d937457
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d937457

Branch: refs/heads/HDFS-9806
Commit: 3d937457ee60d617d71b36bb6a29344e771670b4
Parents: 723facf
Author: Wangda Tan 
Authored: Fri Aug 19 16:26:29 2016 -0700
Committer: Wangda Tan 
Committed: Fri Aug 19 16:26:29 2016 -0700

--
 .../Apache_Hadoop_MapReduce_Common_2.6.0.xml|   281 +
 .../Apache_Hadoop_MapReduce_Common_2.7.2.xml|   836 +
 .../Apache_Hadoop_MapReduce_Core_2.6.0.xml  | 30767 
 .../Apache_Hadoop_MapReduce_Core_2.7.2.xml  | 31129 +
 .../Apache_Hadoop_MapReduce_JobClient_2.6.0.xml |   975 +
 .../Apache_Hadoop_MapReduce_JobClient_2.7.2.xml |   990 +
 .../dev-support/jdiff/Null.java |20 +
 .../hadoop-mapreduce-client-common/pom.xml  | 2 +
 .../hadoop-mapreduce-client-core/pom.xml| 2 +
 .../hadoop-mapreduce-client-jobclient/pom.xml   | 2 +
 .../hadoop-mapreduce-client/pom.xml |   127 +
 11 files changed, 65131 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d937457/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.6.0.xml
--
diff --git 
a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.6.0.xml
 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.6.0.xml
new file mode 100644
index 000..66e206b
--- /dev/null
+++ 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.6.0.xml
@@ -0,0 +1,281 @@
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+  
+  
+  
+  
+
+  
+  
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d937457/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.7.2.xml
--
diff --git 
a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.7.2.xml
 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.7.2.xml
new file mode 100644
index 000..c20349b
--- /dev/null
+++ 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.7.2.xml
@@ -0,0 +1,836 @@
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+
+  
+  
+
+
+  
+  
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+  
+
+  
+  
+  
+  
+
+
+
+  
+
+  
+  
+  
+  
+
+
+
+  
+
+  
+  
+  
+  
+
+  
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+  
+
+
+  
+
+
+  
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+  
+  
+  
+  
+
+
+
+  
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+
+  
+  
+  
+  
+
+
+
+  
+
+
+

[01/50] [abbrv] hadoop git commit: YARN=5526. DrainDispacher#serviceStop blocked if setDrainEventsOnStop invoked (sandflee via Varun Saxena)

2016-08-23 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-9806 c2bcffb34 -> c37346d0e


YARN=5526. DrainDispacher#serviceStop blocked if setDrainEventsOnStop invoked 
(sandflee via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/913a8951
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/913a8951
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/913a8951

Branch: refs/heads/HDFS-9806
Commit: 913a8951366a5d816bbae253b230250e0b74849e
Parents: 20f0eb8
Author: Varun Saxena 
Authored: Thu Aug 18 11:38:13 2016 +0530
Committer: Varun Saxena 
Committed: Thu Aug 18 11:38:13 2016 +0530

--
 .../java/org/apache/hadoop/yarn/event/AsyncDispatcher.java | 6 +-
 .../java/org/apache/hadoop/yarn/event/DrainDispatcher.java | 5 +
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/913a8951/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 5dea1c8..89b5861 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -148,7 +148,7 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
   YarnConfiguration.DEFAULT_DISPATCHER_DRAIN_EVENTS_TIMEOUT);
 
   synchronized (waitForDrained) {
-while (!drained && eventHandlingThread != null
+while (!isDrained() && eventHandlingThread != null
 && eventHandlingThread.isAlive()
 && System.currentTimeMillis() < endTime) {
   waitForDrained.wait(1000);
@@ -303,4 +303,8 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
   protected boolean isEventThreadWaiting() {
 return eventHandlingThread.getState() == Thread.State.WAITING;
   }
+
+  protected boolean isDrained() {
+return drained;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/913a8951/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
index cf4b1b5..f769492 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
@@ -95,6 +95,11 @@ public class DrainDispatcher extends AsyncDispatcher {
   }
 
   @Override
+  protected boolean isDrained() {
+return drained;
+  }
+
+  @Override
   protected void serviceStop() throws Exception {
 stopped = true;
 super.serviceStop();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] [abbrv] hadoop git commit: YARN-5533. JMX AM Used metrics for queue wrong when app submited to nodelabel partition (Bibin A Chundatt via Varun Saxena)

2016-08-23 Thread cdouglas
YARN-5533. JMX AM Used metrics for queue wrong when app submited to nodelabel 
partition (Bibin A Chundatt via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59557e85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59557e85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59557e85

Branch: refs/heads/HDFS-9806
Commit: 59557e85a40fb91c7106e8ff3bfe958ffa244b29
Parents: 8179f9a
Author: Varun Saxena 
Authored: Fri Aug 19 15:01:48 2016 +0530
Committer: Varun Saxena 
Committed: Fri Aug 19 15:01:48 2016 +0530

--
 .../resourcemanager/scheduler/capacity/LeafQueue.java   | 3 ++-
 .../capacity/TestNodeLabelContainerAllocation.java  | 9 +
 2 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59557e85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 6bbe85e..636762f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -774,7 +774,8 @@ public class LeafQueue extends AbstractCSQueue {
   application.getAMResource(partitionName));
   user.getResourceUsage().decAMUsed(partitionName,
   application.getAMResource(partitionName));
-  metrics.decAMUsed(application.getUser(), application.getAMResource());
+  metrics.decAMUsed(application.getUser(),
+  application.getAMResource(partitionName));
 }
 applicationAttemptMap.remove(application.getApplicationAttemptId());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59557e85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index 9070577..251e4dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -41,6 +41,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
@@ -1959,6 +1960,14 @@ public class TestNodeLabelContainerAllocation {
 LeafQueue leafQueue = (LeafQueue) cs.getQueue("a");
 assertEquals(0 * GB, leafQueue.getMetrics().getAvailableMB());
 assertEquals(5 * GB, leafQueue.getMetrics().getAllocatedMB());
+
+// Kill all apps in queue a
+cs.killAllAppsInQueue("a");
+rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
+rm1.waitForAppRemovedFromScheduler(app1.getApplicationId());
+
+assertEquals(0 * GB, leafQueue.getMetrics().getUsedAMResourceMB());

[30/50] [abbrv] hadoop git commit: HDFS-10692. Update JDiff report's base version for HDFS from 2.6.0 to 2.7.2. Contributed by Wangda Tan.

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc7a1c54/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
new file mode 100644
index 000..028ba2d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
@@ -0,0 +1,21505 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+  
+
+
+  
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+  
+
+
+
+  
+  
+  
+  
+
+
+  
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+
+
+  
+  
+  
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+

[11/50] [abbrv] hadoop git commit: Revert "YARN-5533. JMX AM Used metrics for queue wrong when app submited to nodelabel partition (Bibin A Chundatt via Varun Saxena)"

2016-08-23 Thread cdouglas
Revert "YARN-5533. JMX AM Used metrics for queue wrong when app submited to 
nodelabel partition (Bibin A Chundatt via Varun Saxena)"

This reverts commit 59557e85a40fb91c7106e8ff3bfe958ffa244b29.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aed3741
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aed3741
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aed3741

Branch: refs/heads/HDFS-9806
Commit: 8aed374182f22c31e886a9a602fb907bbccc4309
Parents: 59557e8
Author: Varun Saxena 
Authored: Fri Aug 19 16:14:16 2016 +0530
Committer: Varun Saxena 
Committed: Fri Aug 19 16:14:16 2016 +0530

--
 .../resourcemanager/scheduler/capacity/LeafQueue.java   | 3 +--
 .../capacity/TestNodeLabelContainerAllocation.java  | 9 -
 2 files changed, 1 insertion(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aed3741/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 636762f..6bbe85e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -774,8 +774,7 @@ public class LeafQueue extends AbstractCSQueue {
   application.getAMResource(partitionName));
   user.getResourceUsage().decAMUsed(partitionName,
   application.getAMResource(partitionName));
-  metrics.decAMUsed(application.getUser(),
-  application.getAMResource(partitionName));
+  metrics.decAMUsed(application.getUser(), application.getAMResource());
 }
 applicationAttemptMap.remove(application.getApplicationAttemptId());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aed3741/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index 251e4dc..9070577 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -41,7 +41,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
@@ -1960,14 +1959,6 @@ public class TestNodeLabelContainerAllocation {
 LeafQueue leafQueue = (LeafQueue) cs.getQueue("a");
 assertEquals(0 * GB, leafQueue.getMetrics().getAvailableMB());
 assertEquals(5 * GB, leafQueue.getMetrics().getAllocatedMB());
-
-// Kill all apps in queue a
-cs.killAllAppsInQueue("a");
-rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
-rm1.waitForAppRemovedFromScheduler(app1.getApplicationId());
-
- 

[29/50] [abbrv] hadoop git commit: HADOOP-13527. Add Spark to CallerContext LimitedPrivate scope. (Contributed by Weiqing Yang)

2016-08-23 Thread cdouglas
HADOOP-13527. Add Spark to CallerContext LimitedPrivate scope. (Contributed by 
Weiqing Yang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/115ecb52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/115ecb52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/115ecb52

Branch: refs/heads/HDFS-9806
Commit: 115ecb52a86b49aad3d058a6b4c1c7926b8b0a40
Parents: d37b45d
Author: Mingliang Liu 
Authored: Sun Aug 21 09:40:29 2016 -0700
Committer: Mingliang Liu 
Committed: Sun Aug 21 09:40:29 2016 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/CallerContext.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/115ecb52/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
index 3d21bfe..bdfa471 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -33,7 +33,7 @@ import java.util.Arrays;
  * This class is immutable.
  */
 @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "Hive", "MapReduce",
-"Pig", "YARN"})
+"Pig", "Spark", "YARN"})
 @InterfaceStability.Evolving
 public final class CallerContext {
   public static final Charset SIGNATURE_ENCODING = StandardCharsets.UTF_8;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: HDFS-8986. Add option to -du to calculate directory space usage excluding snapshots. Contributed by Xiao Chen.

2016-08-23 Thread cdouglas
HDFS-8986. Add option to -du to calculate directory space usage excluding 
snapshots. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0efea49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0efea49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0efea49

Branch: refs/heads/HDFS-9806
Commit: f0efea490e5aa9dd629d2199aae9c5b1290a17ee
Parents: dd76238
Author: Wei-Chiu Chuang 
Authored: Tue Aug 23 04:13:48 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 23 04:14:24 2016 -0700

--
 .../org/apache/hadoop/fs/ContentSummary.java| 127 --
 .../java/org/apache/hadoop/fs/shell/Count.java  |  20 +-
 .../org/apache/hadoop/fs/shell/FsUsage.java |  31 ++-
 .../src/site/markdown/FileSystemShell.md|  11 +-
 .../org/apache/hadoop/fs/shell/TestCount.java   |   5 +-
 .../src/test/resources/testConf.xml |  12 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |   8 +
 .../src/main/proto/hdfs.proto   |   4 +
 .../ContentSummaryComputationContext.java   |   6 +
 .../hadoop/hdfs/server/namenode/INode.java  |   9 +-
 .../hdfs/server/namenode/INodeDirectory.java|   4 +
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 235 ++-
 12 files changed, 432 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0efea49/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index 3dedbcc..3e75951 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -34,6 +34,11 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
   private long length;
   private long fileCount;
   private long directoryCount;
+  // These fields are to track the snapshot-related portion of the values.
+  private long snapshotLength;
+  private long snapshotFileCount;
+  private long snapshotDirectoryCount;
+  private long snapshotSpaceConsumed;
 
   /** We don't use generics. Instead override spaceConsumed and other methods
   in order to keep backward compatibility. */
@@ -56,6 +61,26 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
   return this;
 }
 
+public Builder snapshotLength(long snapshotLength) {
+  this.snapshotLength = snapshotLength;
+  return this;
+}
+
+public Builder snapshotFileCount(long snapshotFileCount) {
+  this.snapshotFileCount = snapshotFileCount;
+  return this;
+}
+
+public Builder snapshotDirectoryCount(long snapshotDirectoryCount) {
+  this.snapshotDirectoryCount = snapshotDirectoryCount;
+  return this;
+}
+
+public Builder snapshotSpaceConsumed(long snapshotSpaceConsumed) {
+  this.snapshotSpaceConsumed = snapshotSpaceConsumed;
+  return this;
+}
+
 @Override
 public Builder quota(long quota){
   super.quota(quota);
@@ -107,6 +132,10 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
 private long length;
 private long fileCount;
 private long directoryCount;
+private long snapshotLength;
+private long snapshotFileCount;
+private long snapshotDirectoryCount;
+private long snapshotSpaceConsumed;
   }
 
   /** Constructor deprecated by ContentSummary.Builder*/
@@ -142,17 +171,37 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
 this.length = builder.length;
 this.fileCount = builder.fileCount;
 this.directoryCount = builder.directoryCount;
+this.snapshotLength = builder.snapshotLength;
+this.snapshotFileCount = builder.snapshotFileCount;
+this.snapshotDirectoryCount = builder.snapshotDirectoryCount;
+this.snapshotSpaceConsumed = builder.snapshotSpaceConsumed;
   }
 
   /** @return the length */
   public long getLength() {return length;}
 
+  public long getSnapshotLength() {
+return snapshotLength;
+  }
+
   /** @return the directory count */
   public long getDirectoryCount() {return directoryCount;}
 
+  public long getSnapshotDirectoryCount() {
+return snapshotDirectoryCount;
+  }
+
   /** @return the file count */
   public long getFileCount() {return fileCount;}
 
+  public long getSnapshotFileCount() {
+return snapshotFileCount;
+  }
+
+  public long getSnapshotSpaceConsumed() {
+return snapshotSpaceConsumed;
+  }
+
   @Override
   

[16/50] [abbrv] hadoop git commit: MAPREDUCE-6763. Shuffle server listen queue is too small (Jason Lowe via Varun Saxena)

2016-08-23 Thread cdouglas
MAPREDUCE-6763. Shuffle server listen queue is too small (Jason Lowe via Varun 
Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/723facfa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/723facfa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/723facfa

Branch: refs/heads/HDFS-9806
Commit: 723facfa408f469891e67084bdafa18841a0cf64
Parents: 763f049
Author: Varun Saxena 
Authored: Sat Aug 20 03:58:01 2016 +0530
Committer: Varun Saxena 
Committed: Sat Aug 20 03:58:16 2016 +0530

--
 .../src/main/resources/mapred-default.xml  | 6 ++
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java | 6 ++
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/723facfa/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 33eece3..73aaa7a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -211,6 +211,12 @@
 
 
 
+  mapreduce.shuffle.listen.queue.size
+  128
+  The length of the shuffle server listen queue.
+
+
+
   mapreduce.shuffle.connection-keep-alive.enable
   false
   set to true to support keep-alive connections.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/723facfa/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 8721ef5..7818c81 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -195,6 +195,10 @@ public class ShuffleHandler extends AuxiliaryService {
   public static final String SHUFFLE_PORT_CONFIG_KEY = 
"mapreduce.shuffle.port";
   public static final int DEFAULT_SHUFFLE_PORT = 13562;
 
+  public static final String SHUFFLE_LISTEN_QUEUE_SIZE =
+  "mapreduce.shuffle.listen.queue.size";
+  public static final int DEFAULT_SHUFFLE_LISTEN_QUEUE_SIZE = 128;
+
   public static final String SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED =
   "mapreduce.shuffle.connection-keep-alive.enable";
   public static final boolean DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED = 
false;
@@ -504,6 +508,8 @@ public class ShuffleHandler extends AuxiliaryService {
 } catch (Exception ex) {
   throw new RuntimeException(ex);
 }
+bootstrap.setOption("backlog", conf.getInt(SHUFFLE_LISTEN_QUEUE_SIZE,
+DEFAULT_SHUFFLE_LISTEN_QUEUE_SIZE));
 bootstrap.setOption("child.keepAlive", true);
 bootstrap.setPipelineFactory(pipelineFact);
 port = conf.getInt(SHUFFLE_PORT_CONFIG_KEY, DEFAULT_SHUFFLE_PORT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: HDFS-9745. TestSecureNNWithQJM#testSecureMode sometimes fails with timeouts. Contributed by Xiao Chen

2016-08-23 Thread cdouglas
HDFS-9745. TestSecureNNWithQJM#testSecureMode sometimes fails with timeouts. 
Contributed by Xiao Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/126d165e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/126d165e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/126d165e

Branch: refs/heads/HDFS-9806
Commit: 126d165efd80e266a8309241f3cf059e358f5019
Parents: 6f9c346
Author: Jason Lowe 
Authored: Tue Aug 23 14:46:58 2016 +
Committer: Jason Lowe 
Committed: Tue Aug 23 14:46:58 2016 +

--
 .../java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/126d165e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
index 9abfb9c..46b016f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
@@ -79,7 +79,7 @@ public class TestSecureNNWithQJM {
   private MiniJournalCluster mjc;
 
   @Rule
-  public Timeout timeout = new Timeout(3);
+  public Timeout timeout = new Timeout(18);
 
   @BeforeClass
   public static void init() throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: YARN-3388. Allocation in LeafQueue could get stuck because DRF calculator isn't well supported when computing user-limit. (Nathan Roberts via wangda)

2016-08-23 Thread cdouglas
YARN-3388. Allocation in LeafQueue could get stuck because DRF calculator isn't 
well supported when computing user-limit. (Nathan Roberts via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/444b2ea7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/444b2ea7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/444b2ea7

Branch: refs/heads/HDFS-9806
Commit: 444b2ea7afebf9f6c3d356154b71abfd0ea95b23
Parents: 3d93745
Author: Wangda Tan 
Authored: Fri Aug 19 16:28:32 2016 -0700
Committer: Wangda Tan 
Committed: Fri Aug 19 16:28:32 2016 -0700

--
 .../scheduler/capacity/LeafQueue.java   | 206 +--
 .../scheduler/capacity/TestLeafQueue.java   | 198 +-
 .../scheduler/capacity/TestUtils.java   |  24 ++-
 3 files changed, 396 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/444b2ea7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 636762f..1ca69be 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -20,6 +20,9 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
 import java.util.*;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
@@ -70,9 +73,11 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPo
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.server.utils.Lock.NoLock;
 import org.apache.hadoop.yarn.util.SystemClock;
+import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Sets;
 
 @Private
 @Unstable
@@ -111,7 +116,7 @@ public class LeafQueue extends AbstractCSQueue {
 
   // cache last cluster resource to compute actual capacity
   private Resource lastClusterResource = Resources.none();
-  
+
   private final QueueResourceLimitsInfo queueResourceLimitsInfo =
   new QueueResourceLimitsInfo();
 
@@ -119,6 +124,10 @@ public class LeafQueue extends AbstractCSQueue {
 
   private OrderingPolicy orderingPolicy = null;
 
+  // Summation of consumed ratios for all users in queue
+  private float totalUserConsumedRatio = 0;
+  private UsageRatios qUsageRatios;
+
   // record all ignore partition exclusivityRMContainer, this will be used to 
do
   // preemption, key is the partition of the RMContainer allocated on
   private Map 
ignorePartitionExclusivityRMContainers =
@@ -135,6 +144,8 @@ public class LeafQueue extends AbstractCSQueue {
 // One time initialization is enough since it is static ordering policy
 this.pendingOrderingPolicy = new FifoOrderingPolicyForPendingApps();
 
+qUsageRatios = new UsageRatios();
+
 if(LOG.isDebugEnabled()) {
   LOG.debug("LeafQueue:" + " name=" + queueName
 + ", fullname=" + getQueuePath());
@@ -159,7 +170,7 @@ public class LeafQueue extends AbstractCSQueue {
 setQueueResourceLimitsInfo(clusterResource);
 
 CapacitySchedulerConfiguration conf = csContext.getConfiguration();
-
+
 
setOrderingPolicy(conf.getOrderingPolicy(getQueuePath()));
 
 userLimit = conf.getUserLimit(getQueuePath());
@@ -1149,6 +1160,9 @@ public class LeafQueue extends AbstractCSQueue {
   private Resource computeUserLimit(FiCaSchedulerApp application,
   Resource clusterResource, User user,
   String nodePartition, SchedulingMode schedulingMode) {
+Resource partitionResource = labelManager.getResourceByLabel(nodePartition,
+clusterResource);
+
 // What is our current 

[45/50] [abbrv] hadoop git commit: HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java
new file mode 100644
index 000..b0b8a65
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.hadoop.util.StopWatch;
+import org.junit.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+/**
+ * Basic unit test for S3A's blocking executor service.
+ */
+public class ITestBlockingThreadPoolExecutorService {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+  BlockingThreadPoolExecutorService.class);
+
+  private static final int NUM_ACTIVE_TASKS = 4;
+  private static final int NUM_WAITING_TASKS = 2;
+  private static final int TASK_SLEEP_MSEC = 100;
+  private static final int SHUTDOWN_WAIT_MSEC = 200;
+  private static final int SHUTDOWN_WAIT_TRIES = 5;
+  private static final int BLOCKING_THRESHOLD_MSEC = 50;
+
+  private static final Integer SOME_VALUE = 1337;
+
+  private static BlockingThreadPoolExecutorService tpe = null;
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+ensureDestroyed();
+  }
+
+  /**
+   * Basic test of running one trivial task.
+   */
+  @Test
+  public void testSubmitCallable() throws Exception {
+ensureCreated();
+ListenableFuture f = tpe.submit(callableSleeper);
+Integer v = f.get();
+assertEquals(SOME_VALUE, v);
+  }
+
+  /**
+   * More involved test, including detecting blocking when at capacity.
+   */
+  @Test
+  public void testSubmitRunnable() throws Exception {
+ensureCreated();
+int totalTasks = NUM_ACTIVE_TASKS + NUM_WAITING_TASKS;
+StopWatch stopWatch = new StopWatch().start();
+for (int i = 0; i < totalTasks; i++) {
+  tpe.submit(sleeper);
+  assertDidntBlock(stopWatch);
+}
+tpe.submit(sleeper);
+assertDidBlock(stopWatch);
+  }
+
+  @Test
+  public void testShutdown() throws Exception {
+// Cover create / destroy, regardless of when this test case runs
+ensureCreated();
+ensureDestroyed();
+
+// Cover create, execute, destroy, regardless of when test case runs
+ensureCreated();
+testSubmitRunnable();
+ensureDestroyed();
+  }
+
+  // Helper functions, etc.
+
+  private void assertDidntBlock(StopWatch sw) {
+try {
+  assertFalse("Non-blocking call took too long.",
+  sw.now(TimeUnit.MILLISECONDS) > BLOCKING_THRESHOLD_MSEC);
+} finally {
+  sw.reset().start();
+}
+  }
+
+  private void assertDidBlock(StopWatch sw) {
+try {
+  if (sw.now(TimeUnit.MILLISECONDS) < BLOCKING_THRESHOLD_MSEC) {
+throw new RuntimeException("Blocking call returned too fast.");
+  }
+} finally {
+  sw.reset().start();
+}
+  }
+
+  private Runnable sleeper = new Runnable() {
+@Override
+public void run() {
+  String name = Thread.currentThread().getName();
+  try {
+Thread.sleep(TASK_SLEEP_MSEC);
+  } catch (InterruptedException e) {
+LOG.info("Thread {} interrupted.", name);
+Thread.currentThread().interrupt();
+  }
+}
+  };
+
+  private Callable callableSleeper = new Callable() {
+@Override
+public Integer call() throws Exception {
+  sleeper.run();
+  return SOME_VALUE;
+}
+  };
+
+  /**
+   * Helper function to create thread pool under test.
+   */
+  private static void ensureCreated() throws Exception {
+if (tpe == null) {
+  LOG.debug("Creating thread pool");
+  tpe 

[27/50] [abbrv] hadoop git commit: HDFS-10764. Fix INodeFile#getBlocks to not return null. Contributed by Arpit Agarwal.

2016-08-23 Thread cdouglas
HDFS-10764. Fix INodeFile#getBlocks to not return null. Contributed by Arpit 
Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0faee62a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0faee62a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0faee62a

Branch: refs/heads/HDFS-9806
Commit: 0faee62a0c8c1b8fd83227babfd00fbc2b26bddf
Parents: 99603e9
Author: Jing Zhao 
Authored: Fri Aug 19 22:13:36 2016 -0700
Committer: Jing Zhao 
Committed: Fri Aug 19 22:13:36 2016 -0700

--
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 29 
 1 file changed, 11 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0faee62a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 63945a4..12ead7f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -200,8 +200,8 @@ public class INodeFile extends INodeWithAdditionalFields
   public INodeFile(INodeFile that) {
 super(that);
 this.header = that.header;
-this.blocks = that.blocks;
 this.features = that.features;
+setBlocks(that.blocks);
   }
   
   public INodeFile(INodeFile that, FileDiffList diffs) {
@@ -271,9 +271,6 @@ public class INodeFile extends INodeWithAdditionalFields
   /** Assert all blocks are complete. */
   private void assertAllBlocksComplete(int numCommittedAllowed,
   short minReplication) {
-if (blocks == null) {
-  return;
-}
 for (int i = 0; i < blocks.length; i++) {
   final String err = checkBlockComplete(blocks, i, numCommittedAllowed,
   minReplication);
@@ -342,7 +339,7 @@ public class INodeFile extends INodeWithAdditionalFields
   BlockInfo removeLastBlock(Block oldblock) {
 Preconditions.checkState(isUnderConstruction(),
 "file is no longer under construction");
-if (blocks == null || blocks.length == 0) {
+if (blocks.length == 0) {
   return null;
 }
 int size_1 = blocks.length - 1;
@@ -618,7 +615,7 @@ public class INodeFile extends INodeWithAdditionalFields
*/
   void addBlock(BlockInfo newblock) {
 Preconditions.checkArgument(newblock.isStriped() == this.isStriped());
-if (this.blocks == null) {
+if (this.blocks.length == 0) {
   this.setBlocks(new BlockInfo[]{newblock});
 } else {
   int size = this.blocks.length;
@@ -631,12 +628,12 @@ public class INodeFile extends INodeWithAdditionalFields
 
   /** Set the blocks. */
   private void setBlocks(BlockInfo[] blocks) {
-this.blocks = blocks;
+this.blocks = (blocks != null ? blocks : BlockInfo.EMPTY_ARRAY);
   }
 
   /** Clear all blocks of the file. */
   public void clearBlocks() {
-setBlocks(BlockInfo.EMPTY_ARRAY);
+this.blocks = BlockInfo.EMPTY_ARRAY;
   }
 
   @Override
@@ -836,7 +833,7 @@ public class INodeFile extends INodeWithAdditionalFields
*/
   public final long computeFileSize(boolean includesLastUcBlock,
   boolean usePreferredBlockSize4LastUcBlock) {
-if (blocks == null || blocks.length == 0) {
+if (blocks.length == 0) {
   return 0;
 }
 final int last = blocks.length - 1;
@@ -876,10 +873,6 @@ public class INodeFile extends INodeWithAdditionalFields
   // TODO: support EC with heterogeneous storage
   public final QuotaCounts storagespaceConsumedStriped() {
 QuotaCounts counts = new QuotaCounts.Builder().build();
-if (blocks == null || blocks.length == 0) {
-  return counts;
-}
-
 for (BlockInfo b : blocks) {
   Preconditions.checkState(b.isStriped());
   long blockSize = b.isComplete() ?
@@ -931,7 +924,7 @@ public class INodeFile extends INodeWithAdditionalFields
* Return the penultimate allocated block for this file.
*/
   BlockInfo getPenultimateBlock() {
-if (blocks == null || blocks.length <= 1) {
+if (blocks.length <= 1) {
   return null;
 }
 return blocks[blocks.length - 2];
@@ -939,12 +932,12 @@ public class INodeFile extends INodeWithAdditionalFields
 
   @Override
   public BlockInfo getLastBlock() {
-return blocks == null || blocks.length == 0? null: blocks[blocks.length-1];
+return blocks.length == 0 ? null: blocks[blocks.length-1];
   }
 
   @Override
   public int numBlocks() {
-return blocks == null ? 0 : blocks.length;
+return 

[25/50] [abbrv] hadoop git commit: HADOOP-13428. Fixed hadoop-common build files to generate jdiff by working around a jdiff-bug. Contributed by Wangda Tan.

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99603e90/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
 
b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
new file mode 100644
index 000..5ef99b2
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
@@ -0,0 +1,46648 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+

[12/50] [abbrv] hadoop git commit: YARN-5533. JMX AM Used metrics for queue wrong when app submited to nodelabel partition (Bibin A Chundatt via Varun Saxena)

2016-08-23 Thread cdouglas
YARN-5533. JMX AM Used metrics for queue wrong when app submited to nodelabel 
partition (Bibin A Chundatt via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/091dd19e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/091dd19e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/091dd19e

Branch: refs/heads/HDFS-9806
Commit: 091dd19e8636b8bd5668dbb3bcd8bbce7c952274
Parents: 8aed3741
Author: Varun Saxena 
Authored: Fri Aug 19 17:30:17 2016 +0530
Committer: Varun Saxena 
Committed: Fri Aug 19 17:30:17 2016 +0530

--
 .../resourcemanager/scheduler/capacity/LeafQueue.java   | 3 ++-
 .../capacity/TestNodeLabelContainerAllocation.java  | 9 +
 2 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/091dd19e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 6bbe85e..636762f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -774,7 +774,8 @@ public class LeafQueue extends AbstractCSQueue {
   application.getAMResource(partitionName));
   user.getResourceUsage().decAMUsed(partitionName,
   application.getAMResource(partitionName));
-  metrics.decAMUsed(application.getUser(), application.getAMResource());
+  metrics.decAMUsed(application.getUser(),
+  application.getAMResource(partitionName));
 }
 applicationAttemptMap.remove(application.getApplicationAttemptId());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/091dd19e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index 9070577..5ccb6f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -41,6 +41,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
@@ -1959,6 +1960,14 @@ public class TestNodeLabelContainerAllocation {
 LeafQueue leafQueue = (LeafQueue) cs.getQueue("a");
 assertEquals(0 * GB, leafQueue.getMetrics().getAvailableMB());
 assertEquals(5 * GB, leafQueue.getMetrics().getAllocatedMB());
+
+// Kill all apps in queue a
+cs.killAllAppsInQueue("a");
+rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
+rm1.waitForAppRemovedFromScheduler(app1.getApplicationId());
+
+assertEquals(0 * GB, leafQueue.getMetrics().getUsedAMResourceMB());

[03/50] [abbrv] hadoop git commit: YARN-4676. Automatic and Asynchronous Decommissioning Nodes Status Tracking. Contributed by Diniel Zhi. (cherry picked from commit d464483bf7f0b3e3be3ba32cd6c3eee546

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da69c32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
index 22aa0ee..5a89e54 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
@@ -272,6 +272,11 @@ public class MockNodes {
 @Override
 public void setUntrackedTimeStamp(long timeStamp) {
 }
+
+@Override
+public Integer getDecommissioningTimeout() {
+  return null;
+}
   };
 
   private static RMNode buildRMNode(int rack, final Resource perNode,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da69c32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 5856e59..f843261 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -709,6 +709,9 @@ public class MockRM extends ResourceManager {
   public void waitForState(NodeId nodeId, NodeState finalState)
   throws InterruptedException {
 RMNode node = getRMContext().getRMNodes().get(nodeId);
+if (node == null) {
+  node = getRMContext().getInactiveRMNodes().get(nodeId);
+}
 Assert.assertNotNull("node shouldn't be null", node);
 int timeWaiting = 0;
 while (!finalState.equals(node.getState())) {
@@ -722,11 +725,17 @@ public class MockRM extends ResourceManager {
   timeWaiting += WAIT_MS_PER_LOOP;
 }
 
-System.out.println("Node State is : " + node.getState());
+System.out.println("Node " + nodeId + " State is : " + node.getState());
 Assert.assertEquals("Node state is not correct (timedout)", finalState,
 node.getState());
   }
 
+  public void sendNodeEvent(MockNM nm, RMNodeEventType event) throws Exception 
{
+RMNodeImpl node = (RMNodeImpl)
+getRMContext().getRMNodes().get(nm.getNodeId());
+node.handle(new RMNodeEvent(nm.getNodeId(), event));
+  }
+
   public KillApplicationResponse killApp(ApplicationId appId) throws Exception 
{
 ApplicationClientProtocol client = getClientRMService();
 KillApplicationRequest req = KillApplicationRequest.newInstance(appId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da69c32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDecommissioningNodesWatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDecommissioningNodesWatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDecommissioningNodesWatcher.java
new file mode 100644
index 000..690de30
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDecommissioningNodesWatcher.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * 

[38/50] [abbrv] hadoop git commit: YARN-5544. TestNodeBlacklistingOnAMFailures fails on trunk. Contributed by Sunil G.

2016-08-23 Thread cdouglas
YARN-5544. TestNodeBlacklistingOnAMFailures fails on trunk. Contributed by 
Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d5997d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d5997d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d5997d2

Branch: refs/heads/HDFS-9806
Commit: 0d5997d2b98eb89e72828dfcd78f02aa4e7e1e67
Parents: 8cc4a67
Author: Rohith Sharma K S 
Authored: Tue Aug 23 10:33:28 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Aug 23 14:37:39 2016 +0530

--
 .../TestNodeBlacklistingOnAMFailures.java   | 16 
 1 file changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5997d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
index ef6d43b..7a24b7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
@@ -68,6 +68,9 @@ public class TestNodeBlacklistingOnAMFailures {
 MockRM rm = startRM(conf, dispatcher);
 CapacityScheduler scheduler = (CapacityScheduler) 
rm.getResourceScheduler();
 
+// Register 5 nodes, so that we can blacklist atleast one if AM container
+// is failed. As per calculation it will be like, 5nodes * 0.2 (default)=1.
+// First register 2 nodes, and after AM lauched register 3 more nodes.
 MockNM nm1 =
 new MockNM("127.0.0.1:1234", 8000, rm.getResourceTrackerService());
 nm1.registerNode();
@@ -93,6 +96,19 @@ public class TestNodeBlacklistingOnAMFailures {
   otherNode = nm1;
 }
 
+// register 3 nodes now
+MockNM nm3 =
+new MockNM("127.0.0.3:2345", 8000, rm.getResourceTrackerService());
+nm3.registerNode();
+
+MockNM nm4 =
+new MockNM("127.0.0.4:2345", 8000, rm.getResourceTrackerService());
+nm4.registerNode();
+
+MockNM nm5 =
+new MockNM("127.0.0.5:2345", 8000, rm.getResourceTrackerService());
+nm5.registerNode();
+
 // Set the exist status to INVALID so that we can verify that the system
 // automatically blacklisting the node
 makeAMContainerExit(rm, amContainerId, currentNode,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HDFS-10783. The option '-maxSize' and '-step' fail in OfflineImageViewer. Contributed by Yiqun Lin.

2016-08-23 Thread cdouglas
HDFS-10783. The option '-maxSize' and '-step' fail in OfflineImageViewer. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e90f3359
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e90f3359
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e90f3359

Branch: refs/heads/HDFS-9806
Commit: e90f3359de299ef5e3a54ca71070e3dfe1dbb98c
Parents: 0d5997d
Author: Akira Ajisaka 
Authored: Tue Aug 23 19:56:27 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Aug 23 19:57:23 2016 +0900

--
 .../offlineImageViewer/OfflineImageViewer.java   |  2 ++
 .../TestOfflineImageViewer.java  | 19 +++
 2 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e90f3359/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
index 7f81ba8..770cde1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
@@ -186,6 +186,8 @@ public class OfflineImageViewer {
 
 options.addOption("p", "processor", true, "");
 options.addOption("h", "help", false, "");
+options.addOption("maxSize", true, "");
+options.addOption("step", true, "");
 options.addOption("skipBlocks", false, "");
 options.addOption("printToScreen", false, "");
 options.addOption("delimiter", true, "");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e90f3359/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index b9aa7f3..a7c30ec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -601,4 +601,23 @@ public class TestOfflineImageViewer {
 "FileDistribution", "-maxSize", "23", "-step", "4"});
 assertEquals(0, status);
   }
+
+  @Test
+  public void testOfflineImageViewerMaxSizeAndStepOptions() throws Exception {
+final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
+final PrintStream out = new PrintStream(bytes);
+final PrintStream oldOut = System.out;
+try {
+  System.setOut(out);
+  // Add the -h option to make the test only for option parsing,
+  // and don't need to do the following operations.
+  OfflineImageViewer.main(new String[] {"-i", "-", "-o", "-", "-p",
+  "FileDistribution", "-maxSize", "512", "-step", "8", "-h"});
+  Assert.assertFalse(bytes.toString().contains(
+  "Error parsing command-line options: "));
+} finally {
+  System.setOut(oldOut);
+  IOUtils.closeStream(out);
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: HADOOP-12765. HttpServer2 should switch to using the non-blocking SslSelectChannelConnector to prevent performance degradation when handling SSL connections. Contrib

2016-08-23 Thread cdouglas
HADOOP-12765. HttpServer2 should switch to using the non-blocking 
SslSelectChannelConnector to prevent performance degradation when handling SSL 
connections. Contributed by Min Shen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03a9343d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03a9343d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03a9343d

Branch: refs/heads/HDFS-9806
Commit: 03a9343d5798384b66fbd21e1e028acaf55b00e9
Parents: 2550371
Author: Wei-Chiu Chuang 
Authored: Fri Aug 19 09:22:49 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Aug 19 09:22:49 2016 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  5 ++
 .../org/apache/hadoop/http/HttpServer2.java | 76 +++-
 .../ssl/SslSelectChannelConnectorSecure.java| 58 +++
 .../security/ssl/SslSocketConnectorSecure.java  | 58 ---
 .../hadoop/crypto/key/kms/server/MiniKMS.java   |  9 +--
 .../org/apache/hadoop/test/TestJettyHelper.java |  6 +-
 hadoop-project/pom.xml  |  5 ++
 7 files changed, 118 insertions(+), 99 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03a9343d/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index ef2fdf5..c28a05c 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -106,6 +106,11 @@
   compile
 
 
+  org.mortbay.jetty
+  jetty-sslengine
+  compile
+
+
   javax.servlet.jsp
   jsp-api
   runtime

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03a9343d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 8199c9b..a2bb18f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -56,7 +56,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
-import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
+import org.apache.hadoop.security.ssl.SslSelectChannelConnectorSecure;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.security.SecurityUtil;
@@ -77,7 +77,7 @@ import org.mortbay.jetty.handler.ContextHandlerCollection;
 import org.mortbay.jetty.handler.HandlerCollection;
 import org.mortbay.jetty.handler.RequestLogHandler;
 import org.mortbay.jetty.nio.SelectChannelConnector;
-import org.mortbay.jetty.security.SslSocketConnector;
+import org.mortbay.jetty.security.SslSelectChannelConnector;
 import org.mortbay.jetty.servlet.AbstractSessionManager;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.DefaultServlet;
@@ -332,29 +332,7 @@ public final class HttpServer2 implements FilterContainer {
 if ("http".equals(scheme)) {
   listener = HttpServer2.createDefaultChannelConnector();
 } else if ("https".equals(scheme)) {
-  SslSocketConnector c = new SslSocketConnectorSecure();
-  c.setHeaderBufferSize(1024*64);
-  c.setNeedClientAuth(needsClientAuth);
-  c.setKeyPassword(keyPassword);
-
-  if (keyStore != null) {
-c.setKeystore(keyStore);
-c.setKeystoreType(keyStoreType);
-c.setPassword(keyStorePassword);
-  }
-
-  if (trustStore != null) {
-c.setTruststore(trustStore);
-c.setTruststoreType(trustStoreType);
-c.setTrustPassword(trustStorePassword);
-  }
-
-  if(null != excludeCiphers && !excludeCiphers.isEmpty()) {
-c.setExcludeCipherSuites(excludeCiphers.split(","));
-LOG.info("Excluded Cipher List:" + excludeCiphers);
-  }
-
-  listener = c;
+  listener = createHttpsChannelConnector();
 
 } else {
   throw new HadoopIllegalArgumentException(
@@ -367,6 +345,32 @@ public final class HttpServer2 implements FilterContainer {
   server.loadListeners();
   return server;
 }
+
+private Connector 

[40/50] [abbrv] hadoop git commit: HADOOP-13524. mvn eclipse:eclipse generates .gitignore'able files. Contributed by Vinod Kumar Vavilapalli

2016-08-23 Thread cdouglas
HADOOP-13524. mvn eclipse:eclipse generates .gitignore'able files. Contributed 
by Vinod Kumar Vavilapalli


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd76238a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd76238a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd76238a

Branch: refs/heads/HDFS-9806
Commit: dd76238a3bafd58faa6f38f075505bef1012f150
Parents: e90f335
Author: Jian He 
Authored: Tue Aug 23 19:13:14 2016 +0800
Committer: Jian He 
Committed: Tue Aug 23 19:13:14 2016 +0800

--
 .gitignore | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd76238a/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 998287d..a5d69d0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,11 @@
 .settings
 target
 build
+
+# External tool builders
+*/.externalToolBuilders
+*/maven-eclipse.xml
+
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: HADOOP-13497. fix wrong command in CredentialProviderAPI.md. Contributed by Yuanbo Liu.

2016-08-23 Thread cdouglas
HADOOP-13497. fix wrong command in CredentialProviderAPI.md. Contributed by 
Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aae8d6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aae8d6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aae8d6b

Branch: refs/heads/HDFS-9806
Commit: 8aae8d6bf03ade0607547ed461dc99a336a7e9d4
Parents: 126d165
Author: Masatake Iwasaki 
Authored: Wed Aug 24 00:03:27 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Aug 24 00:03:27 2016 +0900

--
 .../hadoop-common/src/site/markdown/CredentialProviderAPI.md   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aae8d6b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
index 1142372..a40bf2b 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
@@ -119,7 +119,7 @@ See the command options detail in the [Commands 
Manual](CommandsManual.html#cred
 
 Utilizing the credential command will often be for provisioning a password or 
secret to a particular credential store provider. In order to explicitly 
indicate which provider store to use the `-provider` option should be used.
 
-Example: `hadoop credential create ssl.server.keystore.password 
jceks://file/tmp/test.jceks`
+Example: `hadoop credential create ssl.server.keystore.password -provider 
jceks://file/tmp/test.jceks`
 
 In order to indicate a particular provider type and location, the user must 
provide the `hadoop.security.credential.provider.path` configuration element in 
core-site.xml or use the command line option `-provider` on each of the 
credential management commands. This provider path is a comma-separated list of 
URLs that indicates the type and location of a list of providers that should be 
consulted. For example, the following path: 
`user:///,jceks://file/tmp/test.jceks,jceks://h...@nn1.example.com/my/path/test.jceks`
 indicates that the current user's credentials file should be consulted through 
the User Provider, that the local file located at `/tmp/test.jceks` is a Java 
Keystore Provider and that the file located within HDFS at 
`nn1.example.com/my/path/test.jceks` is also a store for a Java Keystore 
Provider.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: MAPREDUCE-6310. Add jdiff support to MapReduce. (Li Lu/vinodkv via wangda)

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d937457/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.7.2.xml
--
diff --git 
a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.7.2.xml
 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.7.2.xml
new file mode 100644
index 000..77074d3
--- /dev/null
+++ 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.7.2.xml
@@ -0,0 +1,31129 @@
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+

[46/50] [abbrv] hadoop git commit: HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

2016-08-23 Thread cdouglas
HADOOP-13446. Support running isolated unit tests separate from AWS integration 
tests. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f9c346e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f9c346e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f9c346e

Branch: refs/heads/HDFS-9806
Commit: 6f9c346e577325ec2059d83d5636b5ff7fa6cdce
Parents: f0efea4
Author: Chris Nauroth 
Authored: Tue Aug 23 07:18:49 2016 -0700
Committer: Chris Nauroth 
Committed: Tue Aug 23 07:18:49 2016 -0700

--
 hadoop-project/pom.xml  |   5 +
 hadoop-tools/hadoop-aws/pom.xml |  95 +++-
 .../src/site/markdown/tools/hadoop-aws/index.md |  67 ++-
 .../fs/contract/s3a/ITestS3AContractCreate.java |  35 ++
 .../fs/contract/s3a/ITestS3AContractDelete.java |  34 ++
 .../fs/contract/s3a/ITestS3AContractDistCp.java |  46 ++
 .../s3a/ITestS3AContractGetFileStatus.java  |  51 ++
 .../fs/contract/s3a/ITestS3AContractMkdir.java  |  34 ++
 .../fs/contract/s3a/ITestS3AContractOpen.java   |  34 ++
 .../fs/contract/s3a/ITestS3AContractRename.java |  62 +++
 .../contract/s3a/ITestS3AContractRootDir.java   |  72 +++
 .../fs/contract/s3a/ITestS3AContractSeek.java   |  34 ++
 .../fs/contract/s3a/TestS3AContractCreate.java  |  33 --
 .../fs/contract/s3a/TestS3AContractDelete.java  |  31 --
 .../fs/contract/s3a/TestS3AContractDistCp.java  |  46 --
 .../s3a/TestS3AContractGetFileStatus.java   |  47 --
 .../fs/contract/s3a/TestS3AContractMkdir.java   |  34 --
 .../fs/contract/s3a/TestS3AContractOpen.java|  31 --
 .../fs/contract/s3a/TestS3AContractRename.java  |  61 ---
 .../fs/contract/s3a/TestS3AContractRootDir.java |  72 ---
 .../fs/contract/s3a/TestS3AContractSeek.java|  31 --
 .../fs/contract/s3n/ITestS3NContractCreate.java |  41 ++
 .../fs/contract/s3n/ITestS3NContractDelete.java |  34 ++
 .../fs/contract/s3n/ITestS3NContractMkdir.java  |  34 ++
 .../fs/contract/s3n/ITestS3NContractOpen.java   |  34 ++
 .../fs/contract/s3n/ITestS3NContractRename.java |  35 ++
 .../contract/s3n/ITestS3NContractRootDir.java   |  35 ++
 .../fs/contract/s3n/ITestS3NContractSeek.java   |  34 ++
 .../fs/contract/s3n/TestS3NContractCreate.java  |  38 --
 .../fs/contract/s3n/TestS3NContractDelete.java  |  31 --
 .../fs/contract/s3n/TestS3NContractMkdir.java   |  34 --
 .../fs/contract/s3n/TestS3NContractOpen.java|  31 --
 .../fs/contract/s3n/TestS3NContractRename.java  |  32 --
 .../fs/contract/s3n/TestS3NContractRootDir.java |  35 --
 .../fs/contract/s3n/TestS3NContractSeek.java|  31 --
 .../ITestBlockingThreadPoolExecutorService.java | 182 +++
 .../fs/s3a/ITestS3AAWSCredentialsProvider.java  | 250 +
 .../fs/s3a/ITestS3ABlockingThreadPool.java  |  80 +++
 .../apache/hadoop/fs/s3a/ITestS3ABlocksize.java |  96 
 .../hadoop/fs/s3a/ITestS3AConfiguration.java| 435 +++
 .../hadoop/fs/s3a/ITestS3ACredentialsInURL.java | 155 ++
 .../hadoop/fs/s3a/ITestS3AEncryption.java   | 104 
 .../ITestS3AEncryptionAlgorithmPropagation.java |  83 +++
 .../s3a/ITestS3AEncryptionFastOutputStream.java |  35 ++
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  | 192 +++
 .../hadoop/fs/s3a/ITestS3AFastOutputStream.java |  74 +++
 .../fs/s3a/ITestS3AFileOperationCost.java   | 191 +++
 .../fs/s3a/ITestS3AFileSystemContract.java  | 106 
 .../fs/s3a/ITestS3ATemporaryCredentials.java| 148 +
 .../TestBlockingThreadPoolExecutorService.java  | 182 ---
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 250 -
 .../fs/s3a/TestS3ABlockingThreadPool.java   |  80 ---
 .../apache/hadoop/fs/s3a/TestS3ABlocksize.java  |  93 
 .../hadoop/fs/s3a/TestS3AConfiguration.java | 429 ---
 .../hadoop/fs/s3a/TestS3ACredentialsInURL.java  | 155 --
 .../apache/hadoop/fs/s3a/TestS3AEncryption.java | 104 
 .../TestS3AEncryptionAlgorithmPropagation.java  |  82 ---
 .../s3a/TestS3AEncryptionFastOutputStream.java  |  35 --
 .../hadoop/fs/s3a/TestS3AFailureHandling.java   | 194 ---
 .../hadoop/fs/s3a/TestS3AFastOutputStream.java  |  74 ---
 .../hadoop/fs/s3a/TestS3AFileOperationCost.java | 191 ---
 .../fs/s3a/TestS3AFileSystemContract.java   | 104 
 .../fs/s3a/TestS3ATemporaryCredentials.java | 148 -
 .../fs/s3a/fileContext/ITestS3AFileContext.java |  23 +
 .../ITestS3AFileContextCreateMkdir.java |  35 ++
 .../ITestS3AFileContextMainOperations.java  |  60 +++
 .../ITestS3AFileContextStatistics.java  |  61 +++
 .../s3a/fileContext/ITestS3AFileContextURI.java |  44 ++
 .../fileContext/ITestS3AFileContextUtil.java|  34 ++
 .../fs/s3a/fileContext/TestS3AFileContext.java  |  23 -
 .../TestS3AFileContextCreateMkdir.java  |  35 --
 .../TestS3AFileContextMainOperations.java   

[44/50] [abbrv] hadoop git commit: HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
deleted file mode 100644
index 5ba1871..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import java.io.IOException;
-import java.net.URI;
-import java.nio.file.AccessDeniedException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-
-import com.amazonaws.auth.AWSCredentials;
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.auth.AWSCredentialsProviderChain;
-import com.amazonaws.auth.BasicAWSCredentials;
-import com.amazonaws.auth.InstanceProfileCredentialsProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.fs.s3a.Constants.*;
-import static org.apache.hadoop.fs.s3a.S3ATestConstants.*;
-import static org.apache.hadoop.fs.s3a.S3AUtils.*;
-import static org.junit.Assert.*;
-
-/**
- * Tests for {@link Constants#AWS_CREDENTIALS_PROVIDER} logic.
- *
- */
-public class TestS3AAWSCredentialsProvider {
-  private static final Logger LOG =
-  LoggerFactory.getLogger(TestS3AAWSCredentialsProvider.class);
-
-  @Rule
-  public Timeout testTimeout = new Timeout(1 * 60 * 1000);
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Declare what exception to raise, and the text which must be found
-   * in it.
-   * @param exceptionClass class of exception
-   * @param text text in exception
-   */
-  private void expectException(Class exceptionClass,
-  String text) {
-exception.expect(exceptionClass);
-exception.expectMessage(text);
-  }
-
-  @Test
-  public void testBadConfiguration() throws IOException {
-Configuration conf = new Configuration();
-conf.set(AWS_CREDENTIALS_PROVIDER, "no.such.class");
-try {
-  createFailingFS(conf);
-} catch (IOException e) {
-  if (!(e.getCause() instanceof ClassNotFoundException)) {
-LOG.error("Unexpected nested cause: {} in {}", e.getCause(), e, e);
-throw e;
-  }
-}
-  }
-
-  /**
-   * Create a filesystem, expect it to fail by raising an IOException.
-   * Raises an assertion exception if in fact the FS does get instantiated.
-   * @param conf configuration
-   * @throws IOException an expected exception.
-   */
-  private void createFailingFS(Configuration conf) throws IOException {
-S3AFileSystem fs = S3ATestUtils.createTestFileSystem(conf);
-fs.listStatus(new Path("/"));
-fail("Expected exception - got " + fs);
-  }
-
-  static class BadCredentialsProvider implements AWSCredentialsProvider {
-
-@SuppressWarnings("unused")
-public BadCredentialsProvider(URI name, Configuration conf) {
-}
-
-@Override
-public AWSCredentials getCredentials() {
-  return new BasicAWSCredentials("bad_key", "bad_secret");
-}
-
-@Override
-public void refresh() {
-}
-  }
-
-  @Test
-  public void testBadCredentials() throws Exception {
-Configuration conf = new Configuration();
-conf.set(AWS_CREDENTIALS_PROVIDER, BadCredentialsProvider.class.getName());
-try {
-  createFailingFS(conf);
-} catch (AccessDeniedException e) {
-  // expected
-}
-  }
-
-  static class GoodCredentialsProvider extends AWSCredentialsProviderChain {
-
-@SuppressWarnings("unused")
-public GoodCredentialsProvider(URI name, Configuration conf) {
-  super(new BasicAWSCredentialsProvider(conf.get(ACCESS_KEY),
-  conf.get(SECRET_KEY)), new 

[05/50] [abbrv] hadoop git commit: HADOOP-13513. Java 1.7 support for org.apache.hadoop.fs.azure testcases. Contributed by Tibor Kiss.

2016-08-23 Thread cdouglas
HADOOP-13513. Java 1.7 support for org.apache.hadoop.fs.azure testcases. 
Contributed by Tibor Kiss.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae4db254
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae4db254
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae4db254

Branch: refs/heads/HDFS-9806
Commit: ae4db2544346370404826d5b55b2678f5f92fe1f
Parents: 0da69c3
Author: Chris Nauroth 
Authored: Thu Aug 18 10:44:36 2016 -0700
Committer: Chris Nauroth 
Committed: Thu Aug 18 10:44:36 2016 -0700

--
 .../test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae4db254/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
index f0df38b..58d278a 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
@@ -35,7 +35,7 @@ import org.slf4j.LoggerFactory;
  * to run those tests.  The returned account might integrate with Azure Storage
  * directly or it might be a mock implementation.
  */
-abstract class AbstractWasbTestBase {
+public abstract class AbstractWasbTestBase {
 
   protected static final Logger LOG =
   LoggerFactory.getLogger(AbstractWasbTestBase.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: HADOOP-13405 doc for fs.s3a.acl.default indicates incorrect values. Contributed by Shen Yinjie

2016-08-23 Thread cdouglas
HADOOP-13405 doc for fs.s3a.acl.default indicates incorrect values. Contributed 
by Shen Yinjie


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/040c185d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/040c185d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/040c185d

Branch: refs/heads/HDFS-9806
Commit: 040c185d624a18627d23cedb12bf91a950ada2fc
Parents: 913a895
Author: Steve Loughran 
Authored: Thu Aug 18 14:35:26 2016 +0100
Committer: Steve Loughran 
Committed: Thu Aug 18 14:36:55 2016 +0100

--
 .../hadoop-common/src/main/resources/core-default.xml  | 6 +++---
 .../src/main/java/org/apache/hadoop/fs/s3a/Constants.java  | 4 ++--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md | 6 +++---
 3 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/040c185d/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index e78795c..dd8f9af 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1039,9 +1039,9 @@
 
 
   fs.s3a.acl.default
-  Set a canned ACL for newly created and copied objects. Value 
may be private, 
- public-read, public-read-write, authenticated-read, log-delivery-write, 
- bucket-owner-read, or bucket-owner-full-control.
+  Set a canned ACL for newly created and copied objects. Value 
may be Private,
+  PublicRead, PublicReadWrite, AuthenticatedRead, LogDeliveryWrite, 
BucketOwnerRead,
+  or BucketOwnerFullControl.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/040c185d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 612b648..6b8f9f5 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -128,8 +128,8 @@ public final class Constants {
   public static final String FAST_BUFFER_SIZE = "fs.s3a.fast.buffer.size";
   public static final int DEFAULT_FAST_BUFFER_SIZE = 1048576; //1MB
 
-  // private | public-read | public-read-write | authenticated-read | 
-  // log-delivery-write | bucket-owner-read | bucket-owner-full-control
+  // Private | PublicRead | PublicReadWrite | AuthenticatedRead |
+  // LogDeliveryWrite | BucketOwnerRead | BucketOwnerFullControl
   public static final String CANNED_ACL = "fs.s3a.acl.default";
   public static final String DEFAULT_CANNED_ACL = "";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/040c185d/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index cb1df83..d459fb5 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -554,9 +554,9 @@ this capability.
 
 
   fs.s3a.acl.default
-  Set a canned ACL for newly created and copied objects. 
Value may be private,
- public-read, public-read-write, authenticated-read, 
log-delivery-write,
- bucket-owner-read, or bucket-owner-full-control.
+  Set a canned ACL for newly created and copied objects. 
Value may be Private,
+PublicRead, PublicReadWrite, AuthenticatedRead, LogDeliveryWrite, 
BucketOwnerRead,
+or BucketOwnerFullControl.
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: HADOOP-13428. Fixed hadoop-common build files to generate jdiff by working around a jdiff-bug. Contributed by Wangda Tan.

2016-08-23 Thread cdouglas
HADOOP-13428. Fixed hadoop-common build files to generate jdiff by working 
around a jdiff-bug. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99603e90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99603e90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99603e90

Branch: refs/heads/HDFS-9806
Commit: 99603e902244f17b04cfd55122f47355d070b588
Parents: 2da32a6
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Authored: Fri Aug 19 19:08:53 2016 -0700
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Committed: Fri Aug 19 19:08:53 2016 -0700

--
 .../dev-support/jdiff-workaround.patch  |98 +
 .../jdiff/Apache_Hadoop_Common_2.7.2.xml| 46648 +
 hadoop-common-project/hadoop-common/pom.xml | 2 +
 hadoop-project-dist/pom.xml |44 +-
 4 files changed, 46789 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99603e90/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch 
b/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch
new file mode 100644
index 000..8f87d40
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch
@@ -0,0 +1,98 @@
+diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
+index a277abd..ed7c709 100644
+--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
+@@ -43,18 +43,6 @@
+   public abstract MetricsSystem init(String prefix);
+
+   /**
+-   * Register a metrics source
+-   * @paramthe actual type of the source object
+-   * @param source object to register
+-   * @param name  of the source. Must be unique or null (then extracted from
+-   *  the annotations of the source object.)
+-   * @param desc  the description of the source (or null. See above.)
+-   * @return the source object
+-   * @exception MetricsException
+-   */
+-  public abstract  T register(String name, String desc, T source);
+-
+-  /**
+* Unregister a metrics source
+* @param name of the source. This is the name you use to call register()
+*/
+@@ -77,18 +65,19 @@
+*/
+   @InterfaceAudience.Private
+   public abstract MetricsSource getSource(String name);
++
+
+   /**
+-   * Register a metrics sink
+-   * @paramthe type of the sink
+-   * @param sink  to register
+-   * @param name  of the sink. Must be unique.
+-   * @param desc  the description of the sink
+-   * @return the sink
++   * Register a metrics source
++   * @paramthe actual type of the source object
++   * @param source object to register
++   * @param name  of the source. Must be unique or null (then extracted from
++   *  the annotations of the source object.)
++   * @param desc  the description of the source (or null. See above.)
++   * @return the source object
+* @exception MetricsException
+*/
+-  public abstract 
+-  T register(String name, String desc, T sink);
++  public abstract  T register(String name, String desc, T source);
+
+   /**
+* Register a callback interface for JMX events
+diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+index 6986edb..eeea81f 100644
+--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+@@ -270,27 +270,6 @@ void registerSource(String name, String desc, 
MetricsSource source) {
+ LOG.debug("Registered source "+ name);
+   }
+
+-  @Override public synchronized 
+-  T register(final String name, final String description, final T sink) {
+-LOG.debug(name +", "+ description);
+-if (allSinks.containsKey(name)) {
+-  LOG.warn("Sink "+ name +" already exists!");
+-  return sink;
+-}
+-allSinks.put(name, sink);
+-if (config != null) {
+-  registerSink(name, description, sink);
+-}
+-// We want to re-register the sink to pick up new config
+-// when the metrics system restarts.
+-register(name, new AbstractCallback() {

[36/50] [abbrv] hadoop git commit: HDFS-8312. Added permission check for moving file to Trash. (Weiwei Yang via Eric Yang)

2016-08-23 Thread cdouglas
HDFS-8312. Added permission check for moving file to Trash. (Weiwei Yang via 
Eric Yang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c49333be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c49333be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c49333be

Branch: refs/heads/HDFS-9806
Commit: c49333becfa7652460976a61eb86522010bcfeed
Parents: 4070caa
Author: Eric Yang 
Authored: Mon Aug 22 18:29:56 2016 -0700
Committer: Eric Yang 
Committed: Mon Aug 22 18:29:56 2016 -0700

--
 .../main/java/org/apache/hadoop/fs/Options.java |  3 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java| 10 ++-
 .../ClientNamenodeProtocolTranslatorPB.java |  7 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 14 +++-
 .../hdfs/server/namenode/FSDirRenameOp.java | 28 +--
 .../apache/hadoop/hdfs/TestDFSPermission.java   | 81 
 7 files changed, 132 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
index da75d1c..dc50286 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
@@ -213,7 +213,8 @@ public final class Options {
*/
   public static enum Rename {
 NONE((byte) 0), // No options
-OVERWRITE((byte) 1); // Overwrite the rename destination
+OVERWRITE((byte) 1), // Overwrite the rename destination
+TO_TRASH ((byte) 2); // Rename to trash
 
 private final byte code;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 14f4c0c..66ef890 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -106,6 +106,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 return deletionInterval != 0;
   }
 
+  @SuppressWarnings("deprecation")
   @Override
   public boolean moveToTrash(Path path) throws IOException {
 if (!isEnabled())
@@ -156,10 +157,11 @@ public class TrashPolicyDefault extends TrashPolicy {
   trashPath = new Path(orig + Time.now());
 }
 
-if (fs.rename(path, trashPath)) {   // move to current trash
-  LOG.info("Moved: '" + path + "' to trash at: " + trashPath);
-  return true;
-}
+// move to current trash
+fs.rename(path, trashPath,
+Rename.TO_TRASH);
+LOG.info("Moved: '" + path + "' to trash at: " + trashPath);
+return true;
   } catch (IOException e) {
 cause = e;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index bcf5269..57f8fd6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -523,16 +523,21 @@ public class ClientNamenodeProtocolTranslatorPB implements
   public void rename2(String src, String dst, Rename... options)
   throws IOException {
 boolean overwrite = false;
+boolean toTrash = false;
 if (options != null) {
   for (Rename option : options) {
 if (option == Rename.OVERWRITE) {
   overwrite = true;
+} else if (option == Rename.TO_TRASH) {
+  toTrash = true;
 }
   }
 }
 

[35/50] [abbrv] hadoop git commit: HADOOP-13526. Add detailed logging in KMS for the authentication failure of proxy user. Contributed by Suraj Acharya.

2016-08-23 Thread cdouglas
HADOOP-13526. Add detailed logging in KMS for the authentication failure of 
proxy user. Contributed by Suraj Acharya.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4070caad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4070caad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4070caad

Branch: refs/heads/HDFS-9806
Commit: 4070caad70db49b50554088d29ac2fbc7ba62a0a
Parents: 3ca4d6d
Author: Xiao Chen 
Authored: Mon Aug 22 18:06:53 2016 -0700
Committer: Xiao Chen 
Committed: Mon Aug 22 18:09:35 2016 -0700

--
 .../web/DelegationTokenAuthenticationFilter.java  | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4070caad/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
index fb6817e..112c952 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.http.NameValuePair;
 import org.apache.http.client.utils.URLEncodedUtils;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
@@ -81,6 +83,9 @@ public class DelegationTokenAuthenticationFilter
   private static final String ERROR_EXCEPTION_JSON = "exception";
   private static final String ERROR_MESSAGE_JSON = "message";
 
+  private static final Logger LOG = LoggerFactory.getLogger(
+  DelegationTokenAuthenticationFilter.class);
+
   /**
* Sets an external DelegationTokenSecretManager instance to
* manage creation and verification of Delegation Tokens.
@@ -261,6 +266,11 @@ public class DelegationTokenAuthenticationFilter
 HttpExceptionUtils.createServletExceptionResponse(response,
 HttpServletResponse.SC_FORBIDDEN, ex);
 requestCompleted = true;
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Authentication exception: " + ex.getMessage(), ex);
+} else {
+  LOG.warn("Authentication exception: " + ex.getMessage());
+}
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: YARN-4676. Automatic and Asynchronous Decommissioning Nodes Status Tracking. Contributed by Diniel Zhi. (cherry picked from commit d464483bf7f0b3e3be3ba32cd6c3eee546

2016-08-23 Thread cdouglas
YARN-4676. Automatic and Asynchronous Decommissioning Nodes Status Tracking. 
Contributed by Diniel Zhi.
(cherry picked from commit d464483bf7f0b3e3be3ba32cd6c3eee546747ab5)

Conflicts:


hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0da69c32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0da69c32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0da69c32

Branch: refs/heads/HDFS-9806
Commit: 0da69c324dee9baab0f0b9700db1cc5b623f8421
Parents: 040c185
Author: Junping Du 
Authored: Thu Aug 18 07:23:29 2016 -0700
Committer: Junping Du 
Committed: Thu Aug 18 07:27:23 2016 -0700

--
 .../org/apache/hadoop/util/HostsFileReader.java | 111 -
 .../apache/hadoop/util/TestHostsFileReader.java |  64 ++-
 hadoop-project/src/site/site.xml|   1 +
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   5 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   5 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  14 +
 .../protocolrecords/RefreshNodesRequest.java|  26 +-
 ..._server_resourcemanager_service_protos.proto |   1 +
 .../hadoop/yarn/client/cli/RMAdminCLI.java  | 166 ---
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  |  24 +-
 .../impl/pb/RefreshNodesRequestPBImpl.java  |  17 +-
 .../src/main/resources/yarn-default.xml |  18 +
 .../server/resourcemanager/AdminService.java|   3 +-
 .../DecommissioningNodesWatcher.java| 439 +++
 .../resourcemanager/NodesListManager.java   | 166 +--
 .../server/resourcemanager/RMServerUtils.java   |   2 +-
 .../resourcemanager/ResourceTrackerService.java |  19 +
 .../server/resourcemanager/rmnode/RMNode.java   |   6 +
 .../rmnode/RMNodeDecommissioningEvent.java  |  41 ++
 .../resourcemanager/rmnode/RMNodeImpl.java  |  54 ++-
 .../webapp/dao/ClusterMetricsInfo.java  |   2 +-
 .../yarn/server/resourcemanager/MockNodes.java  |   5 +
 .../yarn/server/resourcemanager/MockRM.java |  11 +-
 .../TestDecommissioningNodesWatcher.java| 131 ++
 .../resourcemanager/TestRMNodeTransitions.java  |  11 -
 .../TestResourceTrackerService.java | 199 +++--
 .../resourcetracker/TestNMReconnect.java|   2 -
 .../src/site/markdown/YarnCommands.md   |   2 +-
 28 files changed, 1326 insertions(+), 219 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da69c32/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
index 1cba426..2ef1ead 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
@@ -21,16 +21,27 @@ package org.apache.hadoop.util;
 import java.io.*;
 import java.nio.charset.StandardCharsets;
 import java.util.Set;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+import java.util.Map;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
 
-import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.xml.sax.SAXException;
 
 // Keeps track of which datanodes/tasktrackers are allowed to connect to the 
 // namenode/jobtracker.
@@ -38,7 +49,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 

[15/50] [abbrv] hadoop git commit: HADOOP-13252. Tune S3A provider plugin mechanism. Contributed by Steve Loughran.

2016-08-23 Thread cdouglas
HADOOP-13252. Tune S3A provider plugin mechanism. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/763f0497
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/763f0497
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/763f0497

Branch: refs/heads/HDFS-9806
Commit: 763f0497bb996e331e40caed9ca0af966f5b3fac
Parents: 03a9343
Author: Chris Nauroth 
Authored: Fri Aug 19 10:48:10 2016 -0700
Committer: Chris Nauroth 
Committed: Fri Aug 19 10:48:10 2016 -0700

--
 .../src/main/resources/core-default.xml |  19 +-
 .../fs/FileContextMainOperationsBaseTest.java   |   8 +-
 .../apache/hadoop/fs/FileContextUtilBase.java   |   4 +-
 .../fs/s3a/AWSCredentialProviderList.java   | 188 +
 .../fs/s3a/AnonymousAWSCredentialsProvider.java |   4 +
 .../fs/s3a/BasicAWSCredentialsProvider.java |   6 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  56 +--
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 139 ++-
 .../fs/s3a/SimpleAWSCredentialsProvider.java|  86 +
 .../fs/s3a/TemporaryAWSCredentialsProvider.java |  32 +-
 .../src/site/markdown/tools/hadoop-aws/index.md | 381 +++
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  98 -
 .../fs/s3a/TestS3ATemporaryCredentials.java |   4 +-
 .../org/apache/hadoop/fs/s3a/yarn/TestS3A.java  |   4 +-
 14 files changed, 843 insertions(+), 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/763f0497/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index dd8f9af..569d894 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -877,22 +877,27 @@
 
   fs.s3a.aws.credentials.provider
   
-Class name of a credentials provider that implements
-com.amazonaws.auth.AWSCredentialsProvider.  Omit if using access/secret 
keys
-or another authentication mechanism.  The specified class must provide an
-accessible constructor accepting java.net.URI and
-org.apache.hadoop.conf.Configuration, or an accessible default constructor.
+Comma-separated class names of credential provider classes which implement
+com.amazonaws.auth.AWSCredentialsProvider.
+
+These are loaded and queried in sequence for a valid set of credentials.
+Each listed class must provide either an accessible constructor accepting
+java.net.URI and org.apache.hadoop.conf.Configuration, or an accessible
+default constructor.
+
 Specifying org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider allows
 anonymous access to a publicly accessible S3 bucket without any 
credentials.
 Please note that allowing anonymous access to an S3 bucket compromises
-security and therefore is unsuitable for most use cases.  It can be useful
+security and therefore is unsuitable for most use cases. It can be useful
 for accessing public data sets without requiring AWS credentials.
   
 
 
 
   fs.s3a.session.token
-  The session token used with temporary credentials. Used only 
with provider 
org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider.
+  Session token, when using 
org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider
+as one of the providers.
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/763f0497/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index 78b40b5..5f9151a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -109,9 +109,11 @@ public abstract class FileContextMainOperationsBaseTest  {
   
   @After
   public void tearDown() throws Exception {
-boolean del = fc.delete(new 
Path(fileContextTestHelper.getAbsoluteTestRootPath(fc), new Path("test")), 
true);
-assertTrue(del);
-fc.delete(localFsRootPath, true);
+if (fc != null) {
+  boolean del = fc.delete(new 
Path(fileContextTestHelper.getAbsoluteTestRootPath(fc), new Path("test")), 
true);
+

[08/50] [abbrv] hadoop git commit: HADOOP-13504. Refactor jni_common to conform to C89 restrictions imposed by Visual Studio 2010. Contributed by Sammi Chen

2016-08-23 Thread cdouglas
HADOOP-13504. Refactor jni_common to conform to C89 restrictions imposed by 
Visual Studio 2010. Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbcaf999
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbcaf999
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbcaf999

Branch: refs/heads/HDFS-9806
Commit: dbcaf999d9ea7a7c6c090903d1982e5b61200c8b
Parents: c5c3e81
Author: Kai Zheng 
Authored: Fri Aug 19 11:18:15 2016 +0800
Committer: Kai Zheng 
Committed: Fri Aug 19 11:18:15 2016 +0800

--
 .../org/apache/hadoop/io/erasurecode/jni_common.c| 15 ++-
 1 file changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbcaf999/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_common.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_common.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_common.c
index 17e05db..2b1d9ea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_common.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_common.c
@@ -43,21 +43,26 @@ void setCoder(JNIEnv* env, jobject thiz, IsalCoder* pCoder) 
{
 }
 
 IsalCoder* getCoder(JNIEnv* env, jobject thiz) {
-  jclass clazz = (*env)->GetObjectClass(env, thiz);
+  jclass clazz;
+  jmethodID mid;
+  jboolean verbose;
+  jfieldID fid;
+  IsalCoder* pCoder;
 
-  jmethodID mid = (*env)->GetMethodID(env, clazz, "allowVerboseDump", "()Z");
+  clazz = (*env)->GetObjectClass(env, thiz);
+  mid = (*env)->GetMethodID(env, clazz, "allowVerboseDump", "()Z");
   if (mid == NULL) {
 THROW(env, "java/lang/UnsatisfiedLinkError",
  "Method allowVerboseDump not found");
   }
-  jboolean verbose = (*env)->CallBooleanMethod(env, thiz, mid);
+  verbose = (*env)->CallBooleanMethod(env, thiz, mid);
 
-  jfieldID fid = (*env)->GetFieldID(env, clazz, "nativeCoder", "J");
+  fid = (*env)->GetFieldID(env, clazz, "nativeCoder", "J");
   if (fid == NULL) {
 THROW(env, "java/lang/UnsatisfiedLinkError",
 "Field nativeCoder not found");
   }
-  IsalCoder* pCoder = (IsalCoder*)(*env)->GetLongField(env, thiz, fid);
+  pCoder = (IsalCoder*)(*env)->GetLongField(env, thiz, fid);
   pCoder->verbose = (verbose == JNI_TRUE) ? 1 : 0;
 
   return pCoder;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: HADOOP-13428. Fixed hadoop-common build files to generate jdiff by working around a jdiff-bug. Contributed by Wangda Tan.

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99603e90/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index c28a05c..54d1cdd 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -34,6 +34,7 @@
 src/test/resources/kdc
 common
 true
+true
 ../etc/hadoop
 wsce-site.xml
   
@@ -512,6 +513,7 @@
 src/test/resources/test.har/_masterindex
 src/test/resources/test.har/part-0
 src/test/resources/javakeystoreprovider.password
+dev-support/jdiff-workaround.patch
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99603e90/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 0357269..0ee9895 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -160,7 +160,7 @@
 false
   
   
-2.6.0
+2.7.2
 -unstable
 
 
@@ -173,7 +173,7 @@
 org.apache.maven.plugins
 maven-javadoc-plugin
 
-  
+ 
 
   javadoc
 
@@ -241,6 +241,26 @@
 org.apache.maven.plugins
 maven-antrun-plugin
 
+
+  
+  
+pre-site
+prepare-package
+
+  run
+
+
+  
+
+
+
+
+  
+
+  
+
+
   
 site
 prepare-package
@@ -249,7 +269,6 @@
 
 
   
-
 
 
 
@@ -302,6 +321,25 @@
   
 
   
+
+  
+  
+post-site
+prepare-package
+
+  run
+
+
+  
+
+
+
+
+
+  
+
+  
+
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: HADOOP-13503. Improve SaslRpcClient failure logging. Contributed by Xiaobing Zhou.

2016-08-23 Thread cdouglas
HADOOP-13503. Improve SaslRpcClient failure logging. Contributed by Xiaobing 
Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5c3e81b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5c3e81b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5c3e81b

Branch: refs/heads/HDFS-9806
Commit: c5c3e81b49ae6ef0cf9022f90f3709166aa4488d
Parents: 0f51eae
Author: Jing Zhao 
Authored: Thu Aug 18 14:55:26 2016 -0700
Committer: Jing Zhao 
Committed: Thu Aug 18 14:55:26 2016 -0700

--
 .../apache/hadoop/security/SaslRpcClient.java| 19 +++
 1 file changed, 11 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5c3e81b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
index 850f27c..c360937 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
@@ -305,13 +305,16 @@ public class SaslRpcClient {
 authType.getProtocol() + "/" + authType.getServerId(),
 KerberosPrincipal.KRB_NT_SRV_HST).getName();
 
-boolean isPrincipalValid = false;
-
 // use the pattern if defined
 String serverKeyPattern = conf.get(serverKey + ".pattern");
 if (serverKeyPattern != null && !serverKeyPattern.isEmpty()) {
   Pattern pattern = GlobPattern.compile(serverKeyPattern);
-  isPrincipalValid = pattern.matcher(serverPrincipal).matches();
+  if (!pattern.matcher(serverPrincipal).matches()) {
+throw new IllegalArgumentException(String.format(
+"Server has invalid Kerberos principal: %s,"
++ " doesn't match the pattern: %s",
+serverPrincipal, serverKeyPattern));
+  }
 } else {
   // check that the server advertised principal matches our conf
   String confPrincipal = SecurityUtil.getServerPrincipal(
@@ -330,11 +333,11 @@ public class SaslRpcClient {
 "Kerberos principal name does NOT have the expected hostname part: 
"
 + confPrincipal);
   }
-  isPrincipalValid = serverPrincipal.equals(confPrincipal);
-}
-if (!isPrincipalValid) {
-  throw new IllegalArgumentException(
-  "Server has invalid Kerberos principal: " + serverPrincipal);
+  if (!serverPrincipal.equals(confPrincipal)) {
+throw new IllegalArgumentException(String.format(
+"Server has invalid Kerberos principal: %s, expecting: %s",
+serverPrincipal, confPrincipal));
+  }
 }
 return serverPrincipal;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HADOOP-13487. Hadoop KMS should load old delegation tokens from Zookeeper on startup. Contributed by Xiao Chen.

2016-08-23 Thread cdouglas
HADOOP-13487. Hadoop KMS should load old delegation tokens from Zookeeper on 
startup. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d4d347
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d4d347
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d4d347

Branch: refs/heads/HDFS-9806
Commit: f4d4d3474cfd2d1f2d243f5ae5cec17af38270b1
Parents: 22fc46d
Author: Xiao Chen 
Authored: Mon Aug 22 14:31:13 2016 -0700
Committer: Xiao Chen 
Committed: Mon Aug 22 14:42:13 2016 -0700

--
 .../ZKDelegationTokenSecretManager.java | 44 +
 .../TestZKDelegationTokenSecretManager.java | 93 +++-
 2 files changed, 136 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d4d347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index c3ad9f3..6c66e98 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -361,6 +361,7 @@ public abstract class 
ZKDelegationTokenSecretManager children;
+if (isTokenCache) {
+  children = tokenCache.getCurrentData();
+} else {
+  children = keyCache.getCurrentData();
+}
+
+int count = 0;
+for (ChildData child : children) {
+  try {
+if (isTokenCache) {
+  processTokenAddOrUpdate(child);
+} else {
+  processKeyAddOrUpdate(child.getData());
+}
+  } catch (Exception e) {
+LOG.info("Ignoring node {} because it failed to load.",
+child.getPath());
+LOG.debug("Failure exception:", e);
+++count;
+  }
+}
+if (count > 0) {
+  LOG.warn("Ignored {} nodes while loading {} cache.", count, cacheName);
+}
+LOG.info("Loaded {} cache.", cacheName);
+  }
+
   private void processKeyAddOrUpdate(byte[] data) throws IOException {
 ByteArrayInputStream bin = new ByteArrayInputStream(data);
 DataInputStream din = new DataInputStream(bin);
@@ -890,4 +929,9 @@ public abstract class 
ZKDelegationTokenSecretManagerhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d4d347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
index 185a994..c9571ff2 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 
+import com.google.common.base.Supplier;
 import org.apache.curator.RetryPolicy;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
@@ -37,6 +38,7 @@ import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.zookeeper.ZooDefs;
 import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.data.Id;
@@ -44,12 +46,18 @@ import 
org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.junit.Assert.fail;
 
-import org.junit.Test;
 
 public class TestZKDelegationTokenSecretManager {
+  private static final Logger LOG =
+  

[42/50] [abbrv] hadoop git commit: HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3A.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3A.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3A.java
deleted file mode 100644
index a22dd28..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3A.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3a.yarn;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FsStatus;
-import org.apache.hadoop.fs.Path;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.util.EnumSet;
-import org.apache.hadoop.fs.s3a.S3ATestUtils;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-public class TestS3A {
-  private FileContext fc;
-
-  @Rule
-  public final Timeout testTimeout = new Timeout(9);
-
-  @Before
-  public void setUp() throws Exception {
-Configuration conf = new Configuration();
-fc = S3ATestUtils.createTestFileContext(conf);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-if (fc != null) {
-  fc.delete(getTestPath(), true);
-}
-  }
-
-  protected Path getTestPath() {
-return new Path("/tests3afc");
-  }
-
-  @Test
-  public void testS3AStatus() throws Exception {
-FsStatus fsStatus = fc.getFsStatus(null);
-assertNotNull(fsStatus);
-assertTrue("Used capacity should be positive: " + fsStatus.getUsed(),
-fsStatus.getUsed() >= 0);
-assertTrue("Remaining capacity should be positive: " + fsStatus
-.getRemaining(),
-fsStatus.getRemaining() >= 0);
-assertTrue("Capacity should be positive: " + fsStatus.getCapacity(),
-fsStatus.getCapacity() >= 0);
-  }
-
-  @Test
-  public void testS3ACreateFileInSubDir() throws Exception {
-Path dirPath = getTestPath();
-fc.mkdir(dirPath,FileContext.DIR_DEFAULT_PERM,true);
-Path filePath = new Path(dirPath, "file");
-try (FSDataOutputStream file = fc.create(filePath, EnumSet.of(CreateFlag
-.CREATE))) {
-  file.write(666);
-}
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3AMiniYarnCluster.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3AMiniYarnCluster.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3AMiniYarnCluster.java
deleted file mode 100644
index 990d79f..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3AMiniYarnCluster.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.fs.s3a.yarn;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import 

[37/50] [abbrv] hadoop git commit: MAPREDUCE-6587. Remove unused params in connection-related methods of Fetcher. Contributed by Yiqun Lin.

2016-08-23 Thread cdouglas
MAPREDUCE-6587. Remove unused params in connection-related methods of Fetcher. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cc4a670
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cc4a670
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cc4a670

Branch: refs/heads/HDFS-9806
Commit: 8cc4a67059e37b2083cd5468b35a64a403a3e3ae
Parents: c49333b
Author: Akira Ajisaka 
Authored: Tue Aug 23 17:04:55 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Aug 23 17:04:55 2016 +0900

--
 .../org/apache/hadoop/mapreduce/task/reduce/Fetcher.java  | 10 --
 1 file changed, 4 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cc4a670/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
index d8dd7b5..be2f84f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
@@ -263,7 +263,7 @@ class Fetcher extends Thread {
 DataInputStream input = null;
 
 try {
-  setupConnectionsWithRetry(host, remaining, url);
+  setupConnectionsWithRetry(url);
   if (stopped) {
 abortConnect(host, remaining);
   } else {
@@ -374,9 +374,8 @@ class Fetcher extends Thread {
 }
   }
 
-  private void setupConnectionsWithRetry(MapHost host,
-  Set remaining, URL url) throws IOException {
-openConnectionWithRetry(host, remaining, url);
+  private void setupConnectionsWithRetry(URL url) throws IOException {
+openConnectionWithRetry(url);
 if (stopped) {
   return;
 }
@@ -396,8 +395,7 @@ class Fetcher extends Thread {
 verifyConnection(url, msgToEncode, encHash);
   }
 
-  private void openConnectionWithRetry(MapHost host,
-  Set remaining, URL url) throws IOException {
+  private void openConnectionWithRetry(URL url) throws IOException {
 long startTime = Time.monotonicNow();
 boolean shouldWait = true;
 while (shouldWait) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: HDFS-10711. Optimize FSPermissionChecker group membership check. Contributed by Daryn Sharp.

2016-08-23 Thread cdouglas
HDFS-10711. Optimize FSPermissionChecker group membership check. Contributed by 
Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2550371f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2550371f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2550371f

Branch: refs/heads/HDFS-9806
Commit: 2550371f66c49fe0e40aadaa68744311270084ce
Parents: 091dd19
Author: Kihwal Lee 
Authored: Fri Aug 19 09:12:17 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Aug 19 09:12:17 2016 -0500

--
 .../hdfs/server/namenode/FSDirAttrOp.java   |  2 +-
 .../server/namenode/FSPermissionChecker.java| 23 ++--
 2 files changed, 8 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2550371f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index e6d36b8..e19341c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -88,7 +88,7 @@ public class FSDirAttrOp {
 if (username != null && !pc.getUser().equals(username)) {
   throw new AccessControlException("Non-super user cannot change 
owner");
 }
-if (group != null && !pc.containsGroup(group)) {
+if (group != null && !pc.isMemberOfGroup(group)) {
   throw new AccessControlException("User does not belong to " + group);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2550371f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 726319f..c9b1c76 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
+import java.util.Collection;
 import java.util.Stack;
 
 import org.apache.commons.logging.Log;
@@ -81,7 +78,7 @@ class FSPermissionChecker implements AccessControlEnforcer {
   private final UserGroupInformation callerUgi;
 
   private final String user;
-  private final Set groups;
+  private final Collection groups;
   private final boolean isSuper;
   private final INodeAttributeProvider attributeProvider;
 
@@ -92,15 +89,13 @@ class FSPermissionChecker implements AccessControlEnforcer {
 this.fsOwner = fsOwner;
 this.supergroup = supergroup;
 this.callerUgi = callerUgi;
-HashSet s =
-new HashSet(Arrays.asList(callerUgi.getGroupNames()));
-groups = Collections.unmodifiableSet(s);
+this.groups = callerUgi.getGroups();
 user = callerUgi.getShortUserName();
 isSuper = user.equals(fsOwner) || groups.contains(supergroup);
 this.attributeProvider = attributeProvider;
   }
 
-  public boolean containsGroup(String group) {
+  public boolean isMemberOfGroup(String group) {
 return groups.contains(group);
   }
 
@@ -108,10 +103,6 @@ class FSPermissionChecker implements AccessControlEnforcer 
{
 return user;
   }
 
-  public Set getGroups() {
-return groups;
-  }
-
   public boolean isSuperUser() {
 return isSuper;
   }
@@ -337,7 +328,7 @@ class FSPermissionChecker implements AccessControlEnforcer {
 final FsAction checkAction;
 if (getUser().equals(inode.getUserName())) { //user class
   checkAction = mode.getUserAction();
-} else if (getGroups().contains(inode.getGroupName())) { //group class
+} else if (isMemberOfGroup(inode.getGroupName())) { //group class
   checkAction = mode.getGroupAction();
 } else { //other class
   checkAction = mode.getOtherAction();
@@ -407,7 +398,7 @@ class FSPermissionChecker implements AccessControlEnforcer {
   // member of multiple groups that have entries that grant access, 
then
   // it doesn't matter which is chosen, so exit 

[06/50] [abbrv] hadoop git commit: HADOOP-13512. ReloadingX509TrustManager should keep reloading in case of exception. (Contributed by Mingliang Liu)

2016-08-23 Thread cdouglas
HADOOP-13512. ReloadingX509TrustManager should keep reloading in case of 
exception. (Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f51eae0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f51eae0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f51eae0

Branch: refs/heads/HDFS-9806
Commit: 0f51eae0c085ded38216824377acf8122638c3a5
Parents: ae4db25
Author: Mingliang Liu 
Authored: Thu Aug 18 00:04:58 2016 -0700
Committer: Mingliang Liu 
Committed: Thu Aug 18 14:10:37 2016 -0700

--
 .../org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f51eae0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
index bb90a61..597f8d7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
@@ -165,10 +165,10 @@ public final class ReloadingX509TrustManager
   throws IOException, GeneralSecurityException {
 X509TrustManager trustManager = null;
 KeyStore ks = KeyStore.getInstance(type);
-lastLoaded = file.lastModified();
 FileInputStream in = new FileInputStream(file);
 try {
   ks.load(in, password.toCharArray());
+  lastLoaded = file.lastModified();
   LOG.debug("Loaded truststore '" + file + "'");
 } finally {
   in.close();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: MAPREDUCE-6310. Add jdiff support to MapReduce. (Li Lu/vinodkv via wangda)

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d937457/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.6.0.xml
--
diff --git 
a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.6.0.xml
 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.6.0.xml
new file mode 100644
index 000..14f8307
--- /dev/null
+++ 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.6.0.xml
@@ -0,0 +1,30767 @@
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+

[09/50] [abbrv] hadoop git commit: HDFS-10645. Make block report size as a metric and add this metric to datanode web ui. Contributed by Yuanbo Liu.

2016-08-23 Thread cdouglas
HDFS-10645. Make block report size as a metric and add this metric to datanode 
web ui. Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8179f9a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8179f9a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8179f9a4

Branch: refs/heads/HDFS-9806
Commit: 8179f9a493c1b26deb6b1bffacd6a829586b7f98
Parents: dbcaf99
Author: Akira Ajisaka 
Authored: Fri Aug 19 16:15:00 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Aug 19 16:15:00 2016 +0900

--
 .../hadoop-common/src/site/markdown/Metrics.md  |  1 +
 .../hdfs/server/datanode/BPServiceActor.java| 43 +++-
 .../hadoop/hdfs/server/datanode/DNConf.java |  7 +++
 .../src/main/webapps/datanode/datanode.html |  2 +
 .../server/datanode/TestDataNodeMXBean.java | 52 
 5 files changed, 104 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8179f9a4/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index e4e2443..0666b3f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -315,6 +315,7 @@ Each metrics record contains tags such as SessionId and 
Hostname as additional i
 | `TotalReadTime` | Total number of milliseconds spent on read operation |
 | `RemoteBytesRead` | Number of bytes read by remote clients |
 | `RemoteBytesWritten` | Number of bytes written by remote clients |
+| `BPServiceActorInfo` | The information about a block pool service actor |
 
 yarn context
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8179f9a4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 69989fb..4bde758 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -26,10 +26,13 @@ import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.SortedSet;
+import java.util.TreeSet;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -101,6 +104,9 @@ class BPServiceActor implements Runnable {
   private final DataNode dn;
   private final DNConf dnConf;
   private long prevBlockReportId;
+  private final SortedSet blockReportSizes =
+  Collections.synchronizedSortedSet(new TreeSet<>());
+  private final int maxDataLength;
 
   private final IncrementalBlockReportManager ibrManager;
 
@@ -122,6 +128,8 @@ class BPServiceActor implements Runnable {
 prevBlockReportId = ThreadLocalRandom.current().nextLong();
 scheduler = new Scheduler(dnConf.heartBeatInterval,
 dnConf.getLifelineIntervalMs(), dnConf.blockReportInterval);
+// get the value of maxDataLength.
+this.maxDataLength = dnConf.getMaxDataLength();
   }
 
   public DatanodeRegistration getBpRegistration() {
@@ -166,6 +174,8 @@ class BPServiceActor implements Runnable {
 String.valueOf(getScheduler().getLastHearbeatTime()));
 info.put("LastBlockReport",
 String.valueOf(getScheduler().getLastBlockReportTime()));
+info.put("maxBlockReportSize", String.valueOf(getMaxBlockReportSize()));
+info.put("maxDataLength", String.valueOf(maxDataLength));
 return info;
   }
 
@@ -305,6 +315,14 @@ class BPServiceActor implements Runnable {
 }
   }
 
+  private int getMaxBlockReportSize() {
+int maxBlockReportSize = 0;
+if (!blockReportSizes.isEmpty()) {
+  maxBlockReportSize = blockReportSizes.last();
+}
+return maxBlockReportSize;
+  }
+
   private long generateUniqueBlockReportId() {
 // Initialize the block report ID the first time through.
 // Note that 0 is used on the NN to indicate "uninitialized", so we should
@@ -353,12 +371,18 @@ class BPServiceActor 

[34/50] [abbrv] hadoop git commit: Revert "HDFS-10762. Pass IIP for file status related methods. Contributed by Daryn Sharp."

2016-08-23 Thread cdouglas
Revert "HDFS-10762. Pass IIP for file status related methods. Contributed by 
Daryn Sharp."

This reverts commit 22fc46d7659972ff016ccf1c6f781f0c160be26f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ca4d6dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ca4d6dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ca4d6dd

Branch: refs/heads/HDFS-9806
Commit: 3ca4d6ddfd199c95677721ff3bcb95d1da45bd88
Parents: f4d4d34
Author: Kihwal Lee 
Authored: Mon Aug 22 16:57:45 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 22 16:57:45 2016 -0500

--
 .../hdfs/server/namenode/FSDirAppendOp.java |  6 +-
 .../server/namenode/FSDirStatAndListingOp.java  | 80 +++-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  3 +-
 .../hdfs/server/namenode/FSDirectory.java   | 14 ++--
 .../hdfs/server/namenode/INodesInPath.java  | 42 ++
 .../hadoop/hdfs/TestReservedRawPaths.java   | 21 -
 6 files changed, 64 insertions(+), 102 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ca4d6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
index 5192352..3a5d7dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
@@ -85,10 +85,9 @@ final class FSDirAppendOp {
 final LocatedBlock lb;
 final FSDirectory fsd = fsn.getFSDirectory();
 final String src;
-final INodesInPath iip;
 fsd.writeLock();
 try {
-  iip = fsd.resolvePathForWrite(pc, srcArg);
+  final INodesInPath iip = fsd.resolvePathForWrite(pc, srcArg);
   src = iip.getPath();
   // Verify that the destination does not exist as a directory already
   final INode inode = iip.getLastINode();
@@ -149,7 +148,8 @@ final class FSDirAppendOp {
   fsd.writeUnlock();
 }
 
-HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip);
+HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, src, false,
+FSDirectory.isReservedRawName(srcArg));
 if (lb != null) {
   NameNode.stateChangeLog.debug(
   "DIR* NameSystem.appendFile: file {} for {} at {} block {} block"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ca4d6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 88be510..c9eedf5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -108,16 +108,16 @@ class FSDirStatAndListingOp {
 if (!DFSUtil.isValidName(src)) {
   throw new InvalidPathException("Invalid file name: " + src);
 }
-final INodesInPath iip;
 if (fsd.isPermissionEnabled()) {
   FSPermissionChecker pc = fsd.getPermissionChecker();
-  iip = fsd.resolvePath(pc, srcArg, resolveLink);
+  final INodesInPath iip = fsd.resolvePath(pc, srcArg, resolveLink);
+  src = iip.getPath();
   fsd.checkPermission(pc, iip, false, null, null, null, null, false);
 } else {
   src = FSDirectory.resolvePath(srcArg, fsd);
-  iip = fsd.getINodesInPath(src, resolveLink);
 }
-return getFileInfo(fsd, iip);
+return getFileInfo(fsd, src, FSDirectory.isReservedRawName(srcArg),
+   resolveLink);
   }
 
   /**
@@ -230,6 +230,7 @@ class FSDirStatAndListingOp {
   String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
   throws IOException {
 String srcs = FSDirectory.normalizePath(src);
+final boolean isRawPath = FSDirectory.isReservedRawName(src);
 if (FSDirectory.isExactReservedName(srcs)) {
   return getReservedListing(fsd);
 }
@@ -256,7 +257,7 @@ class FSDirStatAndListingOp {
 return new DirectoryListing(
 new HdfsFileStatus[]{ createFileStatus(
 fsd, HdfsFileStatus.EMPTY_NAME, 

[49/50] [abbrv] hadoop git commit: YARN-4491. yarn list command to support filtering by tags. Contributed by Varun Saxena

2016-08-23 Thread cdouglas
YARN-4491. yarn list command to support filtering by tags. Contributed by Varun 
Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/143c59e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/143c59e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/143c59e4

Branch: refs/heads/HDFS-9806
Commit: 143c59e4c5a811eb2c12cf6626d558f9b8796e03
Parents: 8aae8d6
Author: Naganarasimha 
Authored: Wed Aug 24 01:53:02 2016 +0530
Committer: Naganarasimha 
Committed: Wed Aug 24 01:53:02 2016 +0530

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |  10 +
 .../hadoop/yarn/client/api/YarnClient.java  |  25 ++
 .../yarn/client/api/impl/YarnClientImpl.java|  11 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  42 +++-
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 241 ---
 5 files changed, 282 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/143c59e4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index cc164fd..159b518 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -349,6 +349,16 @@ public class ResourceMgrDelegate extends YarnClient {
   }
 
   @Override
+  public List getApplications(
+  Set applicationTypes,
+  EnumSet applicationStates,
+  Set applicationTags)
+  throws YarnException, IOException {
+return client.getApplications(
+applicationTypes, applicationStates, applicationTags);
+  }
+
+  @Override
   public List getApplications(Set queues,
   Set users, Set applicationTypes,
   EnumSet applicationStates) throws YarnException,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/143c59e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
index 218bb34..619ea0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
@@ -314,6 +314,31 @@ public abstract class YarnClient extends AbstractService {
 
   /**
* 
+   * Get a report (ApplicationReport) of Applications matching the given
+   * application types, application states and application tags in the cluster.
+   * 
+   *
+   * 
+   * If the user does not have VIEW_APP access for an application
+   * then the corresponding report will be filtered as described in
+   * {@link #getApplicationReport(ApplicationId)}.
+   * 
+   *
+   * @param applicationTypes set of application types you are interested in
+   * @param applicationStates set of application states you are interested in
+   * @param applicationTags set of application tags you are interested in
+   * @return a list of reports of applications
+   * @throws YarnException
+   * @throws IOException
+   */
+  public abstract List getApplications(
+  Set applicationTypes,
+  EnumSet applicationStates,
+  Set applicationTags) throws YarnException,
+  IOException;
+
+  /**
+   * 
* Get a report (ApplicationReport) of Applications matching the given users,
* queues, application types and application states in the cluster. If any of
* the params is set to null, it is not used when filtering.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/143c59e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 

[17/50] [abbrv] hadoop git commit: MAPREDUCE-6310. Add jdiff support to MapReduce. (Li Lu/vinodkv via wangda)

2016-08-23 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d937457/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.7.2.xml
--
diff --git 
a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.7.2.xml
 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.7.2.xml
new file mode 100644
index 000..dec8dc4
--- /dev/null
+++ 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.7.2.xml
@@ -0,0 +1,990 @@
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+
+
+  
+
+
+  
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d937457/hadoop-mapreduce-project/dev-support/jdiff/Null.java
--
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Null.java 
b/hadoop-mapreduce-project/dev-support/jdiff/Null.java
new file mode 100644
index 000..7b00145
--- /dev/null
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Null.java
@@ -0,0 +1,20 @@
+/**
+ * 

[32/50] [abbrv] hadoop git commit: HDFS-10762. Pass IIP for file status related methods. Contributed by Daryn Sharp.

2016-08-23 Thread cdouglas
HDFS-10762. Pass IIP for file status related methods. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22fc46d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22fc46d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22fc46d7

Branch: refs/heads/HDFS-9806
Commit: 22fc46d7659972ff016ccf1c6f781f0c160be26f
Parents: dc7a1c5
Author: Kihwal Lee 
Authored: Mon Aug 22 15:37:02 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 22 15:37:02 2016 -0500

--
 .../hdfs/server/namenode/FSDirAppendOp.java |  6 +-
 .../server/namenode/FSDirStatAndListingOp.java  | 80 +---
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  3 +-
 .../hdfs/server/namenode/FSDirectory.java   | 14 ++--
 .../hdfs/server/namenode/INodesInPath.java  | 42 --
 .../hadoop/hdfs/TestReservedRawPaths.java   | 21 +
 6 files changed, 102 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22fc46d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
index 3a5d7dc..5192352 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
@@ -85,9 +85,10 @@ final class FSDirAppendOp {
 final LocatedBlock lb;
 final FSDirectory fsd = fsn.getFSDirectory();
 final String src;
+final INodesInPath iip;
 fsd.writeLock();
 try {
-  final INodesInPath iip = fsd.resolvePathForWrite(pc, srcArg);
+  iip = fsd.resolvePathForWrite(pc, srcArg);
   src = iip.getPath();
   // Verify that the destination does not exist as a directory already
   final INode inode = iip.getLastINode();
@@ -148,8 +149,7 @@ final class FSDirAppendOp {
   fsd.writeUnlock();
 }
 
-HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, src, false,
-FSDirectory.isReservedRawName(srcArg));
+HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip);
 if (lb != null) {
   NameNode.stateChangeLog.debug(
   "DIR* NameSystem.appendFile: file {} for {} at {} block {} block"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22fc46d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index c9eedf5..88be510 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -108,16 +108,16 @@ class FSDirStatAndListingOp {
 if (!DFSUtil.isValidName(src)) {
   throw new InvalidPathException("Invalid file name: " + src);
 }
+final INodesInPath iip;
 if (fsd.isPermissionEnabled()) {
   FSPermissionChecker pc = fsd.getPermissionChecker();
-  final INodesInPath iip = fsd.resolvePath(pc, srcArg, resolveLink);
-  src = iip.getPath();
+  iip = fsd.resolvePath(pc, srcArg, resolveLink);
   fsd.checkPermission(pc, iip, false, null, null, null, null, false);
 } else {
   src = FSDirectory.resolvePath(srcArg, fsd);
+  iip = fsd.getINodesInPath(src, resolveLink);
 }
-return getFileInfo(fsd, src, FSDirectory.isReservedRawName(srcArg),
-   resolveLink);
+return getFileInfo(fsd, iip);
   }
 
   /**
@@ -230,7 +230,6 @@ class FSDirStatAndListingOp {
   String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
   throws IOException {
 String srcs = FSDirectory.normalizePath(src);
-final boolean isRawPath = FSDirectory.isReservedRawName(src);
 if (FSDirectory.isExactReservedName(srcs)) {
   return getReservedListing(fsd);
 }
@@ -257,7 +256,7 @@ class FSDirStatAndListingOp {
 return new DirectoryListing(
 new HdfsFileStatus[]{ createFileStatus(
 fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs,
-needLocation, parentStoragePolicy, snapshot, 

hadoop git commit: HADOOP-12668. Support excluding weak Ciphers in HttpServer2 through ssl-server.conf. Contributed by Vijay Singh.

2016-08-23 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 cec608305 -> 5fe29062e


HADOOP-12668. Support excluding weak Ciphers in HttpServer2 through 
ssl-server.conf. Contributed by Vijay Singh.

Change-Id: Ie46a5427d29188935427f67e55203c19fcd83335
(cherry picked from commit 6d4a4e785b254a0761a6373ff1376fe64ecf6398)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5fe29062
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5fe29062
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5fe29062

Branch: refs/heads/branch-2.7
Commit: 5fe29062eba852f8eeca435ab3c17bb42d8fde73
Parents: cec6083
Author: Zhe Zhang 
Authored: Mon Feb 22 14:12:33 2016 -0800
Committer: Zhe Zhang 
Committed: Tue Aug 23 15:09:05 2016 -0700

--
 .../src/main/conf/ssl-server.xml.example|  10 +
 .../org/apache/hadoop/http/HttpServer2.java |  12 ++
 .../security/ssl/FileBasedKeyStoresFactory.java |   2 +
 .../apache/hadoop/http/TestHttpCookieFlag.java  |   5 +-
 .../apache/hadoop/http/TestSSLHttpServer.java   | 198 ++-
 .../hadoop/security/ssl/KeyStoreTestUtil.java   |  81 +++-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|   4 +-
 .../hadoop/yarn/webapp/util/WebAppUtils.java|   4 +-
 8 files changed, 298 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fe29062/hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example 
b/hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example
index 02d300c..a6820e9 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example
+++ b/hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example
@@ -75,4 +75,14 @@
   
 
 
+
+  ssl.server.exclude.cipher.list
+  TLS_ECDHE_RSA_WITH_RC4_128_SHA,SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
+  SSL_RSA_WITH_DES_CBC_SHA,SSL_DHE_RSA_WITH_DES_CBC_SHA,
+  SSL_RSA_EXPORT_WITH_RC4_40_MD5,SSL_RSA_EXPORT_WITH_DES40_CBC_SHA,
+  SSL_RSA_WITH_RC4_128_MD5
+  Optional. The weak security cipher suites that you want excluded
+  from SSL communication.
+
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fe29062/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 11ab23b..6575fc8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -171,6 +171,7 @@ public final class HttpServer2 implements FilterContainer {
 private String hostName;
 private boolean disallowFallbackToRandomSignerSecretProvider;
 private String authFilterConfigurationPrefix = 
"hadoop.http.authentication.";
+private String excludeCiphers;
 
 public Builder setName(String name){
   this.name = name;
@@ -275,6 +276,11 @@ public final class HttpServer2 implements FilterContainer {
   return this;
 }
 
+public Builder excludeCiphers(String pExcludeCiphers) {
+  this.excludeCiphers = pExcludeCiphers;
+  return this;
+}
+
 public HttpServer2 build() throws IOException {
   Preconditions.checkNotNull(name, "name is not set");
   Preconditions.checkState(!endpoints.isEmpty(), "No endpoints specified");
@@ -315,6 +321,12 @@ public final class HttpServer2 implements FilterContainer {
 c.setTruststoreType(trustStoreType);
 c.setTrustPassword(trustStorePassword);
   }
+
+  if(null != excludeCiphers && !excludeCiphers.isEmpty()) {
+c.setExcludeCipherSuites(excludeCiphers.split(","));
+LOG.info("Excluded Cipher List:" + excludeCiphers);
+  }
+
   listener = c;
 
 } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fe29062/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
index 609c71f..f81fbdc 100644
--- 

hadoop git commit: HADOOP-12765. HttpServer2 should switch to using the non-blocking SslSelectChannelConnector to prevent performance degradation when handling SSL connections. Contributed by Min Shen

2016-08-23 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 80d8e79e2 -> 8bc33bf34


HADOOP-12765. HttpServer2 should switch to using the non-blocking 
SslSelectChannelConnector to prevent performance degradation when handling SSL 
connections. Contributed by Min Shen. Branch-2 patch contributed by Wei-Chiu 
Chuang.

(cherry picked from commit dfcbc12026b591745a7d7279f2b840152cb53a91)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bc33bf3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bc33bf3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bc33bf3

Branch: refs/heads/branch-2.8
Commit: 8bc33bf343b7e9005e04f0dc6078bfb06fb22815
Parents: 80d8e79
Author: Zhe Zhang 
Authored: Tue Aug 23 14:46:08 2016 -0700
Committer: Zhe Zhang 
Committed: Tue Aug 23 14:46:47 2016 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  5 ++
 .../org/apache/hadoop/http/HttpServer2.java | 76 +++-
 .../ssl/SslSelectChannelConnectorSecure.java| 58 +++
 .../security/ssl/SslSocketConnectorSecure.java  | 58 ---
 .../hadoop/crypto/key/kms/server/MiniKMS.java   |  9 +--
 .../org/apache/hadoop/test/TestJettyHelper.java |  6 +-
 hadoop-project/pom.xml  |  5 ++
 7 files changed, 118 insertions(+), 99 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bc33bf3/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 9fc4afc..42a2f21 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -106,6 +106,11 @@
 
 
 
+  org.mortbay.jetty
+  jetty-sslengine
+  compile
+
+
   javax.servlet.jsp
   jsp-api
   runtime

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bc33bf3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 2a46836..4b1e6ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -56,7 +56,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
-import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
+import org.apache.hadoop.security.ssl.SslSelectChannelConnectorSecure;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.metrics.MetricsServlet;
@@ -78,7 +78,7 @@ import org.mortbay.jetty.handler.ContextHandlerCollection;
 import org.mortbay.jetty.handler.HandlerCollection;
 import org.mortbay.jetty.handler.RequestLogHandler;
 import org.mortbay.jetty.nio.SelectChannelConnector;
-import org.mortbay.jetty.security.SslSocketConnector;
+import org.mortbay.jetty.security.SslSelectChannelConnector;
 import org.mortbay.jetty.servlet.AbstractSessionManager;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.DefaultServlet;
@@ -333,29 +333,7 @@ public final class HttpServer2 implements FilterContainer {
 if ("http".equals(scheme)) {
   listener = HttpServer2.createDefaultChannelConnector();
 } else if ("https".equals(scheme)) {
-  SslSocketConnector c = new SslSocketConnectorSecure();
-  c.setHeaderBufferSize(1024*64);
-  c.setNeedClientAuth(needsClientAuth);
-  c.setKeyPassword(keyPassword);
-
-  if (keyStore != null) {
-c.setKeystore(keyStore);
-c.setKeystoreType(keyStoreType);
-c.setPassword(keyStorePassword);
-  }
-
-  if (trustStore != null) {
-c.setTruststore(trustStore);
-c.setTruststoreType(trustStoreType);
-c.setTrustPassword(trustStorePassword);
-  }
-
-  if(null != excludeCiphers && !excludeCiphers.isEmpty()) {
-c.setExcludeCipherSuites(excludeCiphers.split(","));
-LOG.info("Excluded Cipher List:" + excludeCiphers);
-  }
-
-  listener = c;
+  listener = createHttpsChannelConnector();
 
 } else {
   throw new HadoopIllegalArgumentException(
@@ -368,6 

hadoop git commit: HADOOP-12765. HttpServer2 should switch to using the non-blocking SslSelectChannelConnector to prevent performance degradation when handling SSL connections. Contributed by Min Shen

2016-08-23 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5fa241daa -> dfcbc1202


HADOOP-12765. HttpServer2 should switch to using the non-blocking 
SslSelectChannelConnector to prevent performance degradation when handling SSL 
connections. Contributed by Min Shen. Branch-2 patch contributed by Wei-Chiu 
Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfcbc120
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfcbc120
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfcbc120

Branch: refs/heads/branch-2
Commit: dfcbc12026b591745a7d7279f2b840152cb53a91
Parents: 5fa241d
Author: Zhe Zhang 
Authored: Tue Aug 23 14:46:08 2016 -0700
Committer: Zhe Zhang 
Committed: Tue Aug 23 14:46:08 2016 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  5 ++
 .../org/apache/hadoop/http/HttpServer2.java | 76 +++-
 .../ssl/SslSelectChannelConnectorSecure.java| 58 +++
 .../security/ssl/SslSocketConnectorSecure.java  | 58 ---
 .../hadoop/crypto/key/kms/server/MiniKMS.java   |  9 +--
 .../org/apache/hadoop/test/TestJettyHelper.java |  6 +-
 hadoop-project/pom.xml  |  5 ++
 7 files changed, 118 insertions(+), 99 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfcbc120/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 09f630c..b35a40e 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -106,6 +106,11 @@
 
 
 
+  org.mortbay.jetty
+  jetty-sslengine
+  compile
+
+
   javax.servlet.jsp
   jsp-api
   runtime

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfcbc120/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index c179bd0..62fb4b6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -56,7 +56,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
-import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
+import org.apache.hadoop.security.ssl.SslSelectChannelConnectorSecure;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.metrics.MetricsServlet;
@@ -78,7 +78,7 @@ import org.mortbay.jetty.handler.ContextHandlerCollection;
 import org.mortbay.jetty.handler.HandlerCollection;
 import org.mortbay.jetty.handler.RequestLogHandler;
 import org.mortbay.jetty.nio.SelectChannelConnector;
-import org.mortbay.jetty.security.SslSocketConnector;
+import org.mortbay.jetty.security.SslSelectChannelConnector;
 import org.mortbay.jetty.servlet.AbstractSessionManager;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.DefaultServlet;
@@ -333,29 +333,7 @@ public final class HttpServer2 implements FilterContainer {
 if ("http".equals(scheme)) {
   listener = HttpServer2.createDefaultChannelConnector();
 } else if ("https".equals(scheme)) {
-  SslSocketConnector c = new SslSocketConnectorSecure();
-  c.setHeaderBufferSize(1024*64);
-  c.setNeedClientAuth(needsClientAuth);
-  c.setKeyPassword(keyPassword);
-
-  if (keyStore != null) {
-c.setKeystore(keyStore);
-c.setKeystoreType(keyStoreType);
-c.setPassword(keyStorePassword);
-  }
-
-  if (trustStore != null) {
-c.setTruststore(trustStore);
-c.setTruststoreType(trustStoreType);
-c.setTrustPassword(trustStorePassword);
-  }
-
-  if(null != excludeCiphers && !excludeCiphers.isEmpty()) {
-c.setExcludeCipherSuites(excludeCiphers.split(","));
-LOG.info("Excluded Cipher List:" + excludeCiphers);
-  }
-
-  listener = c;
+  listener = createHttpsChannelConnector();
 
 } else {
   throw new HadoopIllegalArgumentException(
@@ -368,6 +346,32 @@ public final class HttpServer2 implements FilterContainer {
   

hadoop git commit: HADOOP-12726. Unsupported FS operations should throw UnsupportedOperationException. Contributed by Daniel Templeton.

2016-08-23 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-alpha1 c462ee356 -> e07068472


HADOOP-12726. Unsupported FS operations should throw 
UnsupportedOperationException. Contributed by Daniel Templeton.

(cherry picked from commit c37346d0e3f9d39d0aec7a9c5bda3e9772aa969b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0706847
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0706847
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0706847

Branch: refs/heads/branch-3.0.0-alpha1
Commit: e0706847238d9a72daf4312aab86c4467a0f6659
Parents: c462ee3
Author: Chris Douglas 
Authored: Tue Aug 23 14:05:57 2016 -0700
Committer: Chris Douglas 
Committed: Tue Aug 23 14:18:15 2016 -0700

--
 .../main/java/org/apache/hadoop/fs/ChecksumFileSystem.java   | 6 --
 .../src/main/java/org/apache/hadoop/fs/ChecksumFs.java   | 3 ++-
 .../main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java| 3 ++-
 .../main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java  | 4 ++--
 .../apache/hadoop/metrics2/sink/RollingFileSystemSink.java   | 6 +++---
 .../hadoop-common/src/site/markdown/filesystem/filesystem.md | 8 
 .../main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java| 3 ++-
 .../org/apache/hadoop/fs/s3native/NativeS3FileSystem.java| 3 ++-
 8 files changed, 21 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0706847/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 1f14c4d..e4c0b33 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -356,12 +356,14 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
   @Override
   public FSDataOutputStream append(Path f, int bufferSize,
   Progressable progress) throws IOException {
-throw new IOException("Not supported");
+throw new UnsupportedOperationException("Append is not supported "
++ "by ChecksumFileSystem");
   }
 
   @Override
   public boolean truncate(Path f, long newLength) throws IOException {
-throw new IOException("Not supported");
+throw new UnsupportedOperationException("Truncate is not supported "
++ "by ChecksumFileSystem");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0706847/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 2b632a1..6e98db5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -297,7 +297,8 @@ public abstract class ChecksumFs extends FilterFs {
 
   @Override
   public boolean truncate(Path f, long newLength) throws IOException {
-throw new IOException("Not supported");
+throw new UnsupportedOperationException("Truncate is not supported "
++ "by ChecksumFs");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0706847/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index d429c42..f1afacd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -288,7 +288,8 @@ public class FTPFileSystem extends FileSystem {
   @Override
   public FSDataOutputStream append(Path f, int bufferSize,
   Progressable progress) throws IOException {
-throw new IOException("Not supported");
+throw new UnsupportedOperationException("Append is not supported "
++ "by FTPFileSystem");
   }
   
   /**


hadoop git commit: HADOOP-12726. Unsupported FS operations should throw UnsupportedOperationException. Contributed by Daniel Templeton.

2016-08-23 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk 143c59e4c -> c37346d0e


HADOOP-12726. Unsupported FS operations should throw 
UnsupportedOperationException. Contributed by Daniel Templeton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c37346d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c37346d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c37346d0

Branch: refs/heads/trunk
Commit: c37346d0e3f9d39d0aec7a9c5bda3e9772aa969b
Parents: 143c59e
Author: Chris Douglas 
Authored: Tue Aug 23 14:05:57 2016 -0700
Committer: Chris Douglas 
Committed: Tue Aug 23 14:12:52 2016 -0700

--
 .../main/java/org/apache/hadoop/fs/ChecksumFileSystem.java   | 6 --
 .../src/main/java/org/apache/hadoop/fs/ChecksumFs.java   | 3 ++-
 .../main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java| 3 ++-
 .../main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java  | 4 ++--
 .../apache/hadoop/metrics2/sink/RollingFileSystemSink.java   | 6 +++---
 .../hadoop-common/src/site/markdown/filesystem/filesystem.md | 8 
 .../main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java| 3 ++-
 .../org/apache/hadoop/fs/s3native/NativeS3FileSystem.java| 3 ++-
 8 files changed, 21 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37346d0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 1f14c4d..e4c0b33 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -356,12 +356,14 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
   @Override
   public FSDataOutputStream append(Path f, int bufferSize,
   Progressable progress) throws IOException {
-throw new IOException("Not supported");
+throw new UnsupportedOperationException("Append is not supported "
++ "by ChecksumFileSystem");
   }
 
   @Override
   public boolean truncate(Path f, long newLength) throws IOException {
-throw new IOException("Not supported");
+throw new UnsupportedOperationException("Truncate is not supported "
++ "by ChecksumFileSystem");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37346d0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 2b632a1..6e98db5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -297,7 +297,8 @@ public abstract class ChecksumFs extends FilterFs {
 
   @Override
   public boolean truncate(Path f, long newLength) throws IOException {
-throw new IOException("Not supported");
+throw new UnsupportedOperationException("Truncate is not supported "
++ "by ChecksumFs");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37346d0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index d429c42..f1afacd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -288,7 +288,8 @@ public class FTPFileSystem extends FileSystem {
   @Override
   public FSDataOutputStream append(Path f, int bufferSize,
   Progressable progress) throws IOException {
-throw new IOException("Not supported");
+throw new UnsupportedOperationException("Append is not supported "
++ "by FTPFileSystem");
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c37346d0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
--

hadoop git commit: YARN-4491. yarn list command to support filtering by tags. Contributed by Varun Saxena

2016-08-23 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e1d3b8e71 -> 5fa241daa


YARN-4491. yarn list command to support filtering by tags. Contributed by Varun 
Saxena

(cherry picked from commit 143c59e4c5a811eb2c12cf6626d558f9b8796e03)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5fa241da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5fa241da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5fa241da

Branch: refs/heads/branch-2
Commit: 5fa241daa055571ca331d09005f6ddf8c1b514c5
Parents: e1d3b8e
Author: Naganarasimha 
Authored: Wed Aug 24 01:53:02 2016 +0530
Committer: Naganarasimha 
Committed: Wed Aug 24 01:55:24 2016 +0530

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |  10 +
 .../hadoop/yarn/client/api/YarnClient.java  |  25 ++
 .../yarn/client/api/impl/YarnClientImpl.java|  11 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  42 +++-
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 241 ---
 5 files changed, 282 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fa241da/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index cc164fd..159b518 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -349,6 +349,16 @@ public class ResourceMgrDelegate extends YarnClient {
   }
 
   @Override
+  public List getApplications(
+  Set applicationTypes,
+  EnumSet applicationStates,
+  Set applicationTags)
+  throws YarnException, IOException {
+return client.getApplications(
+applicationTypes, applicationStates, applicationTags);
+  }
+
+  @Override
   public List getApplications(Set queues,
   Set users, Set applicationTypes,
   EnumSet applicationStates) throws YarnException,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fa241da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
index 218bb34..619ea0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
@@ -314,6 +314,31 @@ public abstract class YarnClient extends AbstractService {
 
   /**
* 
+   * Get a report (ApplicationReport) of Applications matching the given
+   * application types, application states and application tags in the cluster.
+   * 
+   *
+   * 
+   * If the user does not have VIEW_APP access for an application
+   * then the corresponding report will be filtered as described in
+   * {@link #getApplicationReport(ApplicationId)}.
+   * 
+   *
+   * @param applicationTypes set of application types you are interested in
+   * @param applicationStates set of application states you are interested in
+   * @param applicationTags set of application tags you are interested in
+   * @return a list of reports of applications
+   * @throws YarnException
+   * @throws IOException
+   */
+  public abstract List getApplications(
+  Set applicationTypes,
+  EnumSet applicationStates,
+  Set applicationTags) throws YarnException,
+  IOException;
+
+  /**
+   * 
* Get a report (ApplicationReport) of Applications matching the given users,
* queues, application types and application states in the cluster. If any of
* the params is set to null, it is not used when filtering.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fa241da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--

hadoop git commit: YARN-4491. yarn list command to support filtering by tags. Contributed by Varun Saxena

2016-08-23 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8aae8d6bf -> 143c59e4c


YARN-4491. yarn list command to support filtering by tags. Contributed by Varun 
Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/143c59e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/143c59e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/143c59e4

Branch: refs/heads/trunk
Commit: 143c59e4c5a811eb2c12cf6626d558f9b8796e03
Parents: 8aae8d6
Author: Naganarasimha 
Authored: Wed Aug 24 01:53:02 2016 +0530
Committer: Naganarasimha 
Committed: Wed Aug 24 01:53:02 2016 +0530

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |  10 +
 .../hadoop/yarn/client/api/YarnClient.java  |  25 ++
 .../yarn/client/api/impl/YarnClientImpl.java|  11 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  42 +++-
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 241 ---
 5 files changed, 282 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/143c59e4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index cc164fd..159b518 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -349,6 +349,16 @@ public class ResourceMgrDelegate extends YarnClient {
   }
 
   @Override
+  public List getApplications(
+  Set applicationTypes,
+  EnumSet applicationStates,
+  Set applicationTags)
+  throws YarnException, IOException {
+return client.getApplications(
+applicationTypes, applicationStates, applicationTags);
+  }
+
+  @Override
   public List getApplications(Set queues,
   Set users, Set applicationTypes,
   EnumSet applicationStates) throws YarnException,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/143c59e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
index 218bb34..619ea0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
@@ -314,6 +314,31 @@ public abstract class YarnClient extends AbstractService {
 
   /**
* 
+   * Get a report (ApplicationReport) of Applications matching the given
+   * application types, application states and application tags in the cluster.
+   * 
+   *
+   * 
+   * If the user does not have VIEW_APP access for an application
+   * then the corresponding report will be filtered as described in
+   * {@link #getApplicationReport(ApplicationId)}.
+   * 
+   *
+   * @param applicationTypes set of application types you are interested in
+   * @param applicationStates set of application states you are interested in
+   * @param applicationTags set of application tags you are interested in
+   * @return a list of reports of applications
+   * @throws YarnException
+   * @throws IOException
+   */
+  public abstract List getApplications(
+  Set applicationTypes,
+  EnumSet applicationStates,
+  Set applicationTags) throws YarnException,
+  IOException;
+
+  /**
+   * 
* Get a report (ApplicationReport) of Applications matching the given users,
* queues, application types and application states in the cluster. If any of
* the params is set to null, it is not used when filtering.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/143c59e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 

[44/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/df6af2f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
index ff49403..b945451 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
@@ -20,7 +20,9 @@ import Ember from 'ember';
 
 export default Ember.Route.extend({
   model() {
-var apps = this.store.findAll('yarn-app');
-return apps;
+return Ember.RSVP.hash({
+  apps: this.store.findAll('yarn-app'),
+  clusterMetrics: this.store.findAll('ClusterMetric'),
+});
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df6af2f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
new file mode 100644
index 000..8719170
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df6af2f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
new file mode 100644
index 000..8719170
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df6af2f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
index 6e57388..64a1b3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
@@ -22,6 +22,7 @@ export default Ember.Route.extend({
   model(param) {
 // Fetches data from both NM and RM. RM is queried to get node usage info.
 return Ember.RSVP.hash({
+  nodeInfo: { id: param.node_id, addr: param.node_addr },
   node: this.store.findRecord('yarn-node', param.node_addr),
   rmNode: this.store.findRecord('yarn-rm-node', param.node_id)
 });


[16/50] [abbrv] hadoop git commit: HDFS-10783. The option '-maxSize' and '-step' fail in OfflineImageViewer. Contributed by Yiqun Lin.

2016-08-23 Thread wangda
HDFS-10783. The option '-maxSize' and '-step' fail in OfflineImageViewer. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e90f3359
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e90f3359
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e90f3359

Branch: refs/heads/YARN-3368
Commit: e90f3359de299ef5e3a54ca71070e3dfe1dbb98c
Parents: 0d5997d
Author: Akira Ajisaka 
Authored: Tue Aug 23 19:56:27 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Aug 23 19:57:23 2016 +0900

--
 .../offlineImageViewer/OfflineImageViewer.java   |  2 ++
 .../TestOfflineImageViewer.java  | 19 +++
 2 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e90f3359/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
index 7f81ba8..770cde1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
@@ -186,6 +186,8 @@ public class OfflineImageViewer {
 
 options.addOption("p", "processor", true, "");
 options.addOption("h", "help", false, "");
+options.addOption("maxSize", true, "");
+options.addOption("step", true, "");
 options.addOption("skipBlocks", false, "");
 options.addOption("printToScreen", false, "");
 options.addOption("delimiter", true, "");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e90f3359/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index b9aa7f3..a7c30ec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -601,4 +601,23 @@ public class TestOfflineImageViewer {
 "FileDistribution", "-maxSize", "23", "-step", "4"});
 assertEquals(0, status);
   }
+
+  @Test
+  public void testOfflineImageViewerMaxSizeAndStepOptions() throws Exception {
+final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
+final PrintStream out = new PrintStream(bytes);
+final PrintStream oldOut = System.out;
+try {
+  System.setOut(out);
+  // Add the -h option to make the test only for option parsing,
+  // and don't need to do the following operations.
+  OfflineImageViewer.main(new String[] {"-i", "-", "-o", "-", "-p",
+  "FileDistribution", "-maxSize", "512", "-step", "8", "-h"});
+  Assert.assertFalse(bytes.toString().contains(
+  "Error parsing command-line options: "));
+} finally {
+  System.setOut(oldOut);
+  IOUtils.closeStream(out);
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: YARN-5488. [YARN-3368] Applications table overflows beyond the page boundary(Harish Jaiprakash via Sunil G)

2016-08-23 Thread wangda
YARN-5488. [YARN-3368] Applications table overflows beyond the page 
boundary(Harish Jaiprakash via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6224ea91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6224ea91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6224ea91

Branch: refs/heads/YARN-3368
Commit: 6224ea9187d1311eb51a9a0e86217e851230fc38
Parents: f8dfa50
Author: sunilg 
Authored: Fri Aug 12 14:51:03 2016 +0530
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 .../src/main/webapp/app/styles/app.css  |  4 +
 .../src/main/webapp/app/templates/yarn-app.hbs  | 98 ++--
 2 files changed, 54 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6224ea91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
index a68a0ac..da5b4bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
@@ -273,3 +273,7 @@ li a.navigation-link.ember-view {
   right: 20px;
   top: 3px;
 }
+
+.x-scroll {
+  overflow-x: scroll;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6224ea91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
index 49c4bfd..9e92fc1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
@@ -49,55 +49,57 @@
 
   
 Basic Info
-
-  
-
-  Application ID
-  Name
-  User
-  Queue
-  State
-  Final Status
-  Start Time
-  Elapsed Time
-  Finished Time
-  Priority
-  Progress
-  Is Unmanaged AM
-
-  
+
+  
+
+  
+Application ID
+Name
+User
+Queue
+State
+Final Status
+Start Time
+Elapsed Time
+Finished Time
+Priority
+Progress
+Is Unmanaged AM
+  
+
 
-  
-
-  {{model.app.id}}
-  {{model.app.appName}}
-  {{model.app.user}}
-  {{model.app.queue}}
-  {{model.app.state}}
-  
-
-  {{model.app.finalStatus}}
-
-  
-  {{model.app.startTime}}
-  {{model.app.elapsedTime}}
-  {{model.app.validatedFinishedTs}}
-  {{model.app.priority}}
-  
-
-  
-{{model.app.progress}}%
+
+  
+{{model.app.id}}
+{{model.app.appName}}
+{{model.app.user}}
+{{model.app.queue}}
+{{model.app.state}}
+
+  
+{{model.app.finalStatus}}
+  
+
+{{model.app.startTime}}
+{{model.app.elapsedTime}}
+{{model.app.validatedFinishedTs}}
+{{model.app.priority}}
+
+  
+
+  {{model.app.progress}}%
+
   
-
-  
-  {{model.app.unmanagedApplication}}
-
-  
-
+
+{{model.app.unmanagedApplication}}
+  
+
+  
+ 

[14/50] [abbrv] hadoop git commit: MAPREDUCE-6587. Remove unused params in connection-related methods of Fetcher. Contributed by Yiqun Lin.

2016-08-23 Thread wangda
MAPREDUCE-6587. Remove unused params in connection-related methods of Fetcher. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cc4a670
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cc4a670
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cc4a670

Branch: refs/heads/YARN-3368
Commit: 8cc4a67059e37b2083cd5468b35a64a403a3e3ae
Parents: c49333b
Author: Akira Ajisaka 
Authored: Tue Aug 23 17:04:55 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Aug 23 17:04:55 2016 +0900

--
 .../org/apache/hadoop/mapreduce/task/reduce/Fetcher.java  | 10 --
 1 file changed, 4 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cc4a670/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
index d8dd7b5..be2f84f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
@@ -263,7 +263,7 @@ class Fetcher extends Thread {
 DataInputStream input = null;
 
 try {
-  setupConnectionsWithRetry(host, remaining, url);
+  setupConnectionsWithRetry(url);
   if (stopped) {
 abortConnect(host, remaining);
   } else {
@@ -374,9 +374,8 @@ class Fetcher extends Thread {
 }
   }
 
-  private void setupConnectionsWithRetry(MapHost host,
-  Set remaining, URL url) throws IOException {
-openConnectionWithRetry(host, remaining, url);
+  private void setupConnectionsWithRetry(URL url) throws IOException {
+openConnectionWithRetry(url);
 if (stopped) {
   return;
 }
@@ -396,8 +395,7 @@ class Fetcher extends Thread {
 verifyConnection(url, msgToEncode, encHash);
   }
 
-  private void openConnectionWithRetry(MapHost host,
-  Set remaining, URL url) throws IOException {
+  private void openConnectionWithRetry(URL url) throws IOException {
 long startTime = Time.monotonicNow();
 boolean shouldWait = true;
 while (shouldWait) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: HDFS-8986. Add option to -du to calculate directory space usage excluding snapshots. Contributed by Xiao Chen.

2016-08-23 Thread wangda
HDFS-8986. Add option to -du to calculate directory space usage excluding 
snapshots. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0efea49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0efea49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0efea49

Branch: refs/heads/YARN-3368
Commit: f0efea490e5aa9dd629d2199aae9c5b1290a17ee
Parents: dd76238
Author: Wei-Chiu Chuang 
Authored: Tue Aug 23 04:13:48 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 23 04:14:24 2016 -0700

--
 .../org/apache/hadoop/fs/ContentSummary.java| 127 --
 .../java/org/apache/hadoop/fs/shell/Count.java  |  20 +-
 .../org/apache/hadoop/fs/shell/FsUsage.java |  31 ++-
 .../src/site/markdown/FileSystemShell.md|  11 +-
 .../org/apache/hadoop/fs/shell/TestCount.java   |   5 +-
 .../src/test/resources/testConf.xml |  12 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |   8 +
 .../src/main/proto/hdfs.proto   |   4 +
 .../ContentSummaryComputationContext.java   |   6 +
 .../hadoop/hdfs/server/namenode/INode.java  |   9 +-
 .../hdfs/server/namenode/INodeDirectory.java|   4 +
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 235 ++-
 12 files changed, 432 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0efea49/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index 3dedbcc..3e75951 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -34,6 +34,11 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
   private long length;
   private long fileCount;
   private long directoryCount;
+  // These fields are to track the snapshot-related portion of the values.
+  private long snapshotLength;
+  private long snapshotFileCount;
+  private long snapshotDirectoryCount;
+  private long snapshotSpaceConsumed;
 
   /** We don't use generics. Instead override spaceConsumed and other methods
   in order to keep backward compatibility. */
@@ -56,6 +61,26 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
   return this;
 }
 
+public Builder snapshotLength(long snapshotLength) {
+  this.snapshotLength = snapshotLength;
+  return this;
+}
+
+public Builder snapshotFileCount(long snapshotFileCount) {
+  this.snapshotFileCount = snapshotFileCount;
+  return this;
+}
+
+public Builder snapshotDirectoryCount(long snapshotDirectoryCount) {
+  this.snapshotDirectoryCount = snapshotDirectoryCount;
+  return this;
+}
+
+public Builder snapshotSpaceConsumed(long snapshotSpaceConsumed) {
+  this.snapshotSpaceConsumed = snapshotSpaceConsumed;
+  return this;
+}
+
 @Override
 public Builder quota(long quota){
   super.quota(quota);
@@ -107,6 +132,10 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
 private long length;
 private long fileCount;
 private long directoryCount;
+private long snapshotLength;
+private long snapshotFileCount;
+private long snapshotDirectoryCount;
+private long snapshotSpaceConsumed;
   }
 
   /** Constructor deprecated by ContentSummary.Builder*/
@@ -142,17 +171,37 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
 this.length = builder.length;
 this.fileCount = builder.fileCount;
 this.directoryCount = builder.directoryCount;
+this.snapshotLength = builder.snapshotLength;
+this.snapshotFileCount = builder.snapshotFileCount;
+this.snapshotDirectoryCount = builder.snapshotDirectoryCount;
+this.snapshotSpaceConsumed = builder.snapshotSpaceConsumed;
   }
 
   /** @return the length */
   public long getLength() {return length;}
 
+  public long getSnapshotLength() {
+return snapshotLength;
+  }
+
   /** @return the directory count */
   public long getDirectoryCount() {return directoryCount;}
 
+  public long getSnapshotDirectoryCount() {
+return snapshotDirectoryCount;
+  }
+
   /** @return the file count */
   public long getFileCount() {return fileCount;}
 
+  public long getSnapshotFileCount() {
+return snapshotFileCount;
+  }
+
+  public long getSnapshotSpaceConsumed() {
+return snapshotSpaceConsumed;
+  }
+
   @Override
   

[28/50] [abbrv] hadoop git commit: YARN-3334. [YARN-3368] Introduce REFRESH button in various UI pages (Sreenath Somarajapuram via Sunil G)

2016-08-23 Thread wangda
YARN-3334. [YARN-3368] Introduce REFRESH button in various UI pages (Sreenath 
Somarajapuram via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51dda0f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51dda0f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51dda0f8

Branch: refs/heads/YARN-3368
Commit: 51dda0f841bd8b9f6b162116bbfb0d6129bb1f91
Parents: df6af2f
Author: sunilg 
Authored: Wed Aug 10 06:53:13 2016 +0530
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 .../app/components/app-usage-donut-chart.js |  5 ---
 .../src/main/webapp/app/components/bar-chart.js |  4 +-
 .../webapp/app/components/breadcrumb-bar.js | 31 ++
 .../main/webapp/app/components/donut-chart.js   |  8 ++--
 .../app/components/queue-usage-donut-chart.js   |  2 +-
 .../app/controllers/yarn-container-log.js   | 40 ++
 .../webapp/app/controllers/yarn-node-app.js | 36 
 .../src/main/webapp/app/routes/abstract.js  | 32 +++
 .../main/webapp/app/routes/cluster-overview.js  | 12 +-
 .../main/webapp/app/routes/yarn-app-attempt.js  |  9 +++-
 .../main/webapp/app/routes/yarn-app-attempts.js |  8 +++-
 .../src/main/webapp/app/routes/yarn-app.js  | 11 -
 .../src/main/webapp/app/routes/yarn-apps.js |  9 +++-
 .../webapp/app/routes/yarn-container-log.js | 10 -
 .../src/main/webapp/app/routes/yarn-node-app.js |  8 +++-
 .../main/webapp/app/routes/yarn-node-apps.js|  8 +++-
 .../webapp/app/routes/yarn-node-container.js|  8 +++-
 .../webapp/app/routes/yarn-node-containers.js   |  8 +++-
 .../src/main/webapp/app/routes/yarn-node.js |  9 +++-
 .../src/main/webapp/app/routes/yarn-nodes.js|  9 +++-
 .../main/webapp/app/routes/yarn-queue-apps.js   | 12 --
 .../src/main/webapp/app/routes/yarn-queue.js| 14 ---
 .../src/main/webapp/app/routes/yarn-queues.js   | 14 ---
 .../src/main/webapp/app/styles/app.css  |  6 +++
 .../webapp/app/templates/cluster-overview.hbs   |  4 +-
 .../app/templates/components/breadcrumb-bar.hbs | 22 ++
 .../webapp/app/templates/yarn-app-attempt.hbs   |  4 +-
 .../webapp/app/templates/yarn-app-attempts.hbs  |  4 +-
 .../src/main/webapp/app/templates/yarn-app.hbs  |  4 +-
 .../src/main/webapp/app/templates/yarn-apps.hbs |  4 +-
 .../webapp/app/templates/yarn-container-log.hbs |  2 +
 .../main/webapp/app/templates/yarn-node-app.hbs |  2 +
 .../webapp/app/templates/yarn-node-apps.hbs |  4 +-
 .../app/templates/yarn-node-container.hbs   |  4 +-
 .../app/templates/yarn-node-containers.hbs  |  4 +-
 .../src/main/webapp/app/templates/yarn-node.hbs |  4 +-
 .../main/webapp/app/templates/yarn-nodes.hbs|  4 +-
 .../webapp/app/templates/yarn-queue-apps.hbs|  4 +-
 .../main/webapp/app/templates/yarn-queue.hbs|  4 +-
 .../main/webapp/app/templates/yarn-queues.hbs   |  4 +-
 .../components/breadcrumb-bar-test.js   | 43 
 .../unit/controllers/yarn-container-log-test.js | 30 ++
 .../unit/controllers/yarn-node-app-test.js  | 30 ++
 43 files changed, 417 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51dda0f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
index 0baf630..90f41fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
@@ -26,7 +26,6 @@ export default BaseUsageDonutChart.extend({
   colors: d3.scale.category20().range(),
 
   draw: function() {
-this.initChart();
 var usageByApps = [];
 var avail = 100;
 
@@ -60,8 +59,4 @@ export default BaseUsageDonutChart.extend({
 this.renderDonutChart(usageByApps, this.get("title"), 
this.get("showLabels"),
   this.get("middleLabel"), "100%", "%");
   },
-
-  didInsertElement: function() {
-this.draw();
-  },
 })
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51dda0f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js
 

[02/50] [abbrv] hadoop git commit: HADOOP-13428. Fixed hadoop-common build files to generate jdiff by working around a jdiff-bug. Contributed by Wangda Tan.

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99603e90/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
 
b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
new file mode 100644
index 000..5ef99b2
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
@@ -0,0 +1,46648 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+

[06/50] [abbrv] hadoop git commit: HADOOP-13527. Add Spark to CallerContext LimitedPrivate scope. (Contributed by Weiqing Yang)

2016-08-23 Thread wangda
HADOOP-13527. Add Spark to CallerContext LimitedPrivate scope. (Contributed by 
Weiqing Yang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/115ecb52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/115ecb52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/115ecb52

Branch: refs/heads/YARN-3368
Commit: 115ecb52a86b49aad3d058a6b4c1c7926b8b0a40
Parents: d37b45d
Author: Mingliang Liu 
Authored: Sun Aug 21 09:40:29 2016 -0700
Committer: Mingliang Liu 
Committed: Sun Aug 21 09:40:29 2016 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/CallerContext.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/115ecb52/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
index 3d21bfe..bdfa471 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -33,7 +33,7 @@ import java.util.Arrays;
  * This class is immutable.
  */
 @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "Hive", "MapReduce",
-"Pig", "YARN"})
+"Pig", "Spark", "YARN"})
 @InterfaceStability.Evolving
 public final class CallerContext {
   public static final Charset SIGNATURE_ENCODING = StandardCharsets.UTF_8;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/df6af2f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
index 8ce4ffa..aae4177 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
@@ -16,55 +16,95 @@
  * limitations under the License.
 }}
 
-
-  {{queue-navigator model=model.queues selected=model.selected}}
+
+  {{em-breadcrumbs items=breadcrumbs}}
 
 
-
-  
-{{queue-configuration-table queue=model.selectedQueue}}
-  
+
+  
 
-  
-{{bar-chart data=model.selectedQueue.capacitiesBarChartData 
-title="Queue Capacities" 
-parentId="capacity-bar-chart"
-textWidth=150
-ratio=0.5
-maxHeight=350}}
-  
+
+  
+
+  Application
+
+
+  
+
+  {{#link-to 'yarn-queue' tagName="li"}}
+{{#link-to 'yarn-queue' model.selected}}Information
+{{/link-to}}
+  {{/link-to}}
+  {{#link-to 'yarn-queue-apps' tagName="li"}}
+{{#link-to 'yarn-queue-apps' model.selected}}Applications List
+{{/link-to}}
+  {{/link-to}}
+
+  
+
+  
+
 
-{{#if model.selectedQueue.hasUserUsages}}
-  
-{{donut-chart data=model.selectedQueue.userUsagesDonutChartData 
-title="User Usages" 
-showLabels=true
-parentId="userusage-donut-chart"
-maxHeight=350}}
-  
-{{/if}}
+
+  
+  
 
-  
-{{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData 
-title="Running Apps" 
-showLabels=true
-parentId="numapplications-donut-chart"
-ratio=0.5
-maxHeight=350}}
-  
-
+
+  
+
+  Queue Information
+
+{{queue-configuration-table queue=model.selectedQueue}}
+  
+
 
-
+
+  
+
+  Queue Capacities
+
+
+  
+  {{bar-chart data=model.selectedQueue.capacitiesBarChartData
+  title=""
+  parentId="capacity-bar-chart"
+  textWidth=170
+  ratio=0.55
+  maxHeight=350}}
+
+  
+
+
+{{#if model.selectedQueue.hasUserUsages}}
+  
+{{donut-chart data=model.selectedQueue.userUsagesDonutChartData
+title="User Usages"
+showLabels=true
+parentId="userusage-donut-chart"
+type="memory"
+ratio=0.6
+maxHeight=350}}
+  
+{{/if}}
+
+
+  
+
+  Running Apps
+
+
+  {{donut-chart 
data=model.selectedQueue.numOfApplicationsDonutChartData
+  showLabels=true
+  parentId="numapplications-donut-chart"
+  ratio=0.6
+  maxHeight=350}}
+
+  
+
+
+  
+
 
-
-  
-{{#if model.apps}}
-  {{app-table table-id="apps-table" arr=model.apps}}
-  {{simple-table table-id="apps-table" bFilter=true 
colTypes="elapsed-time" colTargets="7"}}
-{{else}}
-  Could not find any applications from this 
cluster
-{{/if}}
   
 
-
-{{outlet}}
\ No newline at end of file
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df6af2f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
new file mode 100644
index 000..e27341b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
@@ -0,0 +1,72 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on 

[33/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
new file mode 100644
index 000..4e68da0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+import Constants from 'yarn-ui/constants';
+
+moduleFor('route:yarn-container-log', 'Unit | Route | ContainerLog', {
+});
+
+test('Basic creation test', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+  assert.ok(route.model);
+});
+
+test('Test getting container log', function(assert) {
+  var response = {
+  logs: "This is syslog",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve) {
+resolve(response);
+  }
+)}
+  };
+  assert.expect(6);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.containerLog);
+ assert.deepEqual(value.containerLog, response);
+ assert.ok(value.nodeInfo);
+ assert.equal(value.nodeInfo.addr, 'localhost:8042');
+ assert.equal(value.nodeInfo.id, 'localhost:64318');
+   });
+});
+
+/**
+ * This can happen when an empty response is sent from server
+ */
+test('Test non HTTP error while getting container log', function(assert) {
+  var error = {};
+  var response = {
+  logs: "",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve, reject) {
+reject(error);
+  }
+)}
+  };
+  assert.expect(6);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.containerLog);
+ assert.deepEqual(value.containerLog, response);
+ assert.ok(value.nodeInfo);
+ assert.equal(value.nodeInfo.addr, 'localhost:8042');
+ assert.equal(value.nodeInfo.id, 'localhost:64318');
+   });
+});
+
+test('Test HTTP error while getting container log', function(assert) {
+  var error = {errors: [{status: 404, responseText: 'Not Found'}]};
+  var response = {
+  logs: "",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve, reject) {
+reject(error);
+  }
+)}
+  };
+  assert.expect(5);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.errors);
+ assert.equal(value.errors.length, 1);
+ assert.equal(value.errors[0].status, 404);
+ assert.equal(value.errors[0].responseText, 'Not Found');
+   });
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-app-test.js
--
diff 

[20/50] [abbrv] hadoop git commit: HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContext.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContext.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContext.java
deleted file mode 100644
index 34d78a5..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContext.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3a.fileContext;
-
-import org.apache.hadoop.fs.TestFileContext;
-
-/**
- * Implementation of TestFileContext for S3a
- */
-public class TestS3AFileContext extends TestFileContext{
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextCreateMkdir.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextCreateMkdir.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextCreateMkdir.java
deleted file mode 100644
index b0c4d84..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextCreateMkdir.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3a.fileContext;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContextCreateMkdirBaseTest;
-import org.apache.hadoop.fs.s3a.S3ATestUtils;
-import org.junit.Before;
-
-/**
- * Extends FileContextCreateMkdirBaseTest for a S3a FileContext
- */
-public class TestS3AFileContextCreateMkdir
-extends FileContextCreateMkdirBaseTest {
-
-  @Before
-  public void setUp() throws IOException, Exception {
-Configuration conf = new Configuration();
-fc = S3ATestUtils.createTestFileContext(conf);
-super.setUp();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextMainOperations.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextMainOperations.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextMainOperations.java
deleted file mode 100644
index 4d200d1..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/TestS3AFileContextMainOperations.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3a.fileContext;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContextMainOperationsBaseTest;
-import org.apache.hadoop.fs.s3a.S3ATestUtils;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-/**
- * S3A implementation of FileContextMainOperationsBaseTest
- */
-public class TestS3AFileContextMainOperations

[29/50] [abbrv] hadoop git commit: YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a504ec91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
new file mode 100644
index 000..d39885e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
@@ -0,0 +1,29 @@
+import DS from 'ember-data';
+
+export default DS.JSONAPISerializer.extend({
+normalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  var fixedPayload = {
+id: id,
+type: primaryModelClass.modelName,
+attributes: payload
+  };
+
+  return this._super(store, primaryModelClass, fixedPayload, id,
+requestType);
+},
+
+normalizeArrayResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  // return expected is { data: [ {}, {} ] }
+  var normalizedArrayResponse = {};
+
+  // payload has apps : { app: [ {},{},{} ]  }
+  // need some error handling for ex apps or app may not be defined.
+  normalizedArrayResponse.data = [
+this.normalizeSingleResponse(store, primaryModelClass,
+  payload.clusterMetrics, 1, requestType)
+  ];
+  return normalizedArrayResponse;
+}
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a504ec91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
new file mode 100644
index 000..c5394d0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
@@ -0,0 +1,49 @@
+import DS from 'ember-data';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.JSONAPISerializer.extend({
+internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  
+  if (payload.appAttempt) {
+payload = payload.appAttempt;  
+  }
+  
+  var fixedPayload = {
+id: payload.appAttemptId,
+type: primaryModelClass.modelName, // yarn-app
+attributes: {
+  startTime: Converter.timeStampToDate(payload.startTime),
+  finishedTime: Converter.timeStampToDate(payload.finishedTime),
+  containerId: payload.containerId,
+  nodeHttpAddress: payload.nodeHttpAddress,
+  nodeId: payload.nodeId,
+  state: payload.nodeId,
+  logsLink: payload.logsLink
+}
+  };
+
+  return fixedPayload;
+},
+
+normalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  var p = this.internalNormalizeSingleResponse(store, 
+primaryModelClass, payload, id, requestType);
+  return { data: p };
+},
+
+normalizeArrayResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  // return expected is { data: [ {}, {} ] }
+  var normalizedArrayResponse = {};
+
+  // payload has apps : { app: [ {},{},{} ]  }
+  // need some error handling for ex apps or app may not be defined.
+  normalizedArrayResponse.data = 
payload.appAttempts.appAttempt.map(singleApp => {
+return this.internalNormalizeSingleResponse(store, primaryModelClass,
+  singleApp, singleApp.id, requestType);
+  }, this);
+  return normalizedArrayResponse;
+}
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a504ec91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
new file mode 100644
index 000..a038fff
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
@@ -0,0 +1,66 @@
+import DS from 'ember-data';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.JSONAPISerializer.extend({
+internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  if (payload.app) {
+payload = payload.app;  
+  }
+  
+  var fixedPayload = {
+id: id,
+type: primaryModelClass.modelName, // yarn-app
+attributes: {
+  appName: payload.name,
+  user: payload.user,
+  queue: payload.queue,
+  state: payload.state,
+  startTime: Converter.timeStampToDate(payload.startedTime),
+  elapsedTime: 

[19/50] [abbrv] hadoop git commit: HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3A.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3A.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3A.java
deleted file mode 100644
index a22dd28..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3A.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3a.yarn;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FsStatus;
-import org.apache.hadoop.fs.Path;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.util.EnumSet;
-import org.apache.hadoop.fs.s3a.S3ATestUtils;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-public class TestS3A {
-  private FileContext fc;
-
-  @Rule
-  public final Timeout testTimeout = new Timeout(9);
-
-  @Before
-  public void setUp() throws Exception {
-Configuration conf = new Configuration();
-fc = S3ATestUtils.createTestFileContext(conf);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-if (fc != null) {
-  fc.delete(getTestPath(), true);
-}
-  }
-
-  protected Path getTestPath() {
-return new Path("/tests3afc");
-  }
-
-  @Test
-  public void testS3AStatus() throws Exception {
-FsStatus fsStatus = fc.getFsStatus(null);
-assertNotNull(fsStatus);
-assertTrue("Used capacity should be positive: " + fsStatus.getUsed(),
-fsStatus.getUsed() >= 0);
-assertTrue("Remaining capacity should be positive: " + fsStatus
-.getRemaining(),
-fsStatus.getRemaining() >= 0);
-assertTrue("Capacity should be positive: " + fsStatus.getCapacity(),
-fsStatus.getCapacity() >= 0);
-  }
-
-  @Test
-  public void testS3ACreateFileInSubDir() throws Exception {
-Path dirPath = getTestPath();
-fc.mkdir(dirPath,FileContext.DIR_DEFAULT_PERM,true);
-Path filePath = new Path(dirPath, "file");
-try (FSDataOutputStream file = fc.create(filePath, EnumSet.of(CreateFlag
-.CREATE))) {
-  file.write(666);
-}
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3AMiniYarnCluster.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3AMiniYarnCluster.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3AMiniYarnCluster.java
deleted file mode 100644
index 990d79f..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/TestS3AMiniYarnCluster.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.fs.s3a.yarn;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import 

[47/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d0e0a76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
new file mode 100644
index 000..21a715c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('serializer:yarn-node-app', 'Unit | Serializer | NodeApp', {
+});
+
+test('Basic creation test', function(assert) {
+  let serializer = this.subject();
+
+  assert.ok(serializer);
+  assert.ok(serializer.normalizeSingleResponse);
+  assert.ok(serializer.normalizeArrayResponse);
+  assert.ok(serializer.internalNormalizeSingleResponse);
+});
+
+test('normalizeArrayResponse test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = {
+apps: {
+  app: [{
+id:"application_1456251210105_0001", state:"FINISHED", user:"root"
+  },{
+id:"application_1456251210105_0002", state:"RUNNING",user:"root",
+containerids:["container_e38_1456251210105_0002_01_01",
+"container_e38_1456251210105_0002_01_02"]
+  }]
+}
+  };
+  assert.expect(15);
+  var response =
+  serializer.normalizeArrayResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(response.data.length, 2);
+  assert.equal(response.data[0].attributes.containers, undefined);
+  assert.equal(response.data[1].attributes.containers.length, 2);
+  assert.deepEqual(response.data[1].attributes.containers,
+  payload.apps.app[1].containerids);
+  for (var i = 0; i < 2; i++) {
+assert.equal(response.data[i].type, modelClass.modelName);
+assert.equal(response.data[i].id, payload.apps.app[i].id);
+assert.equal(response.data[i].attributes.appId, payload.apps.app[i].id);
+assert.equal(response.data[i].attributes.state, payload.apps.app[i].state);
+assert.equal(response.data[i].attributes.user, payload.apps.app[i].user);
+  }
+});
+
+test('normalizeArrayResponse no apps test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = { apps: null };
+  assert.expect(5);
+  var response =
+  serializer.normalizeArrayResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(response.data.length, 1);
+  assert.equal(response.data[0].type, modelClass.modelName);
+  assert.equal(response.data[0].id, "dummy");
+  assert.equal(response.data[0].attributes.appId, undefined);
+});
+
+test('normalizeSingleResponse test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = {
+app: {id:"application_1456251210105_0001", state:"FINISHED", user:"root"}
+  };
+  assert.expect(7);
+  var response =
+  serializer.normalizeSingleResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(payload.app.id, response.data.id);
+  assert.equal(modelClass.modelName, response.data.type);
+  assert.equal(payload.app.id, response.data.attributes.appId);
+  assert.equal(payload.app.state, response.data.attributes.state);
+  assert.equal(payload.app.user, response.data.attributes.user);
+  assert.equal(response.data.attributes.containers, undefined);
+});
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d0e0a76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
new file mode 100644
index 000..1f08467
--- 

[45/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-08-23 Thread wangda
YARN-5321. [YARN-3368] Add resource usage for application by node managers 
(Wangda Tan via Sunil G)
YARN-5320. [YARN-3368] Add resource usage by applications and queues to cluster 
overview page  (Wangda Tan via Sunil G)
YARN-5322. [YARN-3368] Add a node heat chart map (Wangda Tan via Sunil G)
YARN-5347. [YARN-3368] Applications page improvements (Sreenath Somarajapuram 
via Sunil G)
YARN-5348. [YARN-3368] Node details page improvements (Sreenath Somarajapuram 
via Sunil G)
YARN-5346. [YARN-3368] Queues page improvements (Sreenath Somarajapuram via 
Sunil G)
YARN-5345. [YARN-3368] Cluster overview page improvements (Sreenath 
Somarajapuram via Sunil G)
YARN-5344. [YARN-3368] Generic UI improvements (Sreenath Somarajapuram via 
Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df6af2f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df6af2f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df6af2f1

Branch: refs/heads/YARN-3368
Commit: df6af2f165eca23d67e38d20075890e40a29a5e3
Parents: 0f40a99
Author: Sunil 
Authored: Fri Jul 15 21:16:06 2016 +0530
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 .../src/main/webapp/app/adapters/yarn-app.js|  14 +
 .../app/components/app-usage-donut-chart.js |  67 
 .../src/main/webapp/app/components/bar-chart.js |   5 +
 .../app/components/base-chart-component.js  |  55 ++-
 .../app/components/base-usage-donut-chart.js|  43 +++
 .../main/webapp/app/components/donut-chart.js   |  55 ++-
 .../main/webapp/app/components/nodes-heatmap.js | 209 +++
 ...er-app-memusage-by-nodes-stacked-barchart.js |  88 +
 ...app-ncontainers-by-nodes-stacked-barchart.js |  67 
 .../app/components/queue-usage-donut-chart.js   |  69 
 .../main/webapp/app/components/queue-view.js|   3 +-
 .../main/webapp/app/components/simple-table.js  |   9 +-
 .../webapp/app/components/stacked-barchart.js   | 198 +++
 .../main/webapp/app/components/timeline-view.js |   2 +-
 .../main/webapp/app/components/tree-selector.js |  43 ++-
 .../webapp/app/controllers/cluster-overview.js  |   9 +
 .../webapp/app/controllers/yarn-app-attempt.js  |  40 +++
 .../webapp/app/controllers/yarn-app-attempts.js |  40 +++
 .../src/main/webapp/app/controllers/yarn-app.js |  38 ++
 .../main/webapp/app/controllers/yarn-apps.js|   9 +
 .../webapp/app/controllers/yarn-node-apps.js|  39 +++
 .../app/controllers/yarn-node-containers.js |  39 +++
 .../main/webapp/app/controllers/yarn-node.js|  37 ++
 .../app/controllers/yarn-nodes-heatmap.js   |  36 ++
 .../main/webapp/app/controllers/yarn-nodes.js   |  33 ++
 .../webapp/app/controllers/yarn-queue-apps.js   |  46 +++
 .../main/webapp/app/controllers/yarn-queue.js   |  20 ++
 .../main/webapp/app/controllers/yarn-queues.js  |  34 ++
 .../webapp/app/controllers/yarn-services.js |  34 ++
 .../main/webapp/app/models/cluster-metric.js|   2 +-
 .../main/webapp/app/models/yarn-app-attempt.js  |  11 +
 .../src/main/webapp/app/models/yarn-app.js  |   4 +
 .../src/main/webapp/app/models/yarn-rm-node.js  |   7 +
 .../src/main/webapp/app/router.js   |  15 +-
 .../src/main/webapp/app/routes/application.js   |   2 +
 .../main/webapp/app/routes/cluster-overview.js  |   9 +-
 .../main/webapp/app/routes/yarn-app-attempts.js |  30 ++
 .../src/main/webapp/app/routes/yarn-app.js  |  17 +-
 .../src/main/webapp/app/routes/yarn-apps.js |   6 +-
 .../main/webapp/app/routes/yarn-apps/apps.js|  22 ++
 .../webapp/app/routes/yarn-apps/services.js |  22 ++
 .../src/main/webapp/app/routes/yarn-node.js |   1 +
 .../src/main/webapp/app/routes/yarn-nodes.js|   5 +-
 .../webapp/app/routes/yarn-nodes/heatmap.js |  22 ++
 .../main/webapp/app/routes/yarn-nodes/table.js  |  22 ++
 .../main/webapp/app/routes/yarn-queue-apps.js   |  36 ++
 .../src/main/webapp/app/routes/yarn-queues.js   |  38 ++
 .../webapp/app/serializers/yarn-app-attempt.js  |  19 +-
 .../src/main/webapp/app/serializers/yarn-app.js |   8 +-
 .../webapp/app/serializers/yarn-container.js|  20 +-
 .../src/main/webapp/app/styles/app.css  | 139 ++--
 .../main/webapp/app/templates/application.hbs   |  99 --
 .../webapp/app/templates/cluster-overview.hbs   | 168 ++---
 .../app/templates/components/app-table.hbs  |  10 +-
 .../templates/components/node-menu-panel.hbs|   2 +-
 .../app/templates/components/nodes-heatmap.hbs  |  27 ++
 .../components/queue-configuration-table.hbs|   4 -
 .../templates/components/queue-navigator.hbs|  14 +-
 .../app/templates/components/timeline-view.hbs  |   3 +-
 .../webapp/app/templates/yarn-app-attempt.hbs   |  13 +-
 .../webapp/app/templates/yarn-app-attempts.hbs  |  57 +++
 .../src/main/webapp/app/templates/yarn-app.hbs  | 346 ---
 

[05/50] [abbrv] hadoop git commit: MAPREDUCE-6762. ControlledJob#toString failed with NPE when job status is not successfully updated (Weiwei Yang via Varun Saxena)

2016-08-23 Thread wangda
MAPREDUCE-6762. ControlledJob#toString failed with NPE when job status is not 
successfully updated (Weiwei Yang via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d37b45d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d37b45d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d37b45d6

Branch: refs/heads/YARN-3368
Commit: d37b45d613b768950d1cbe342961cd71776816ae
Parents: 0faee62
Author: Varun Saxena 
Authored: Sun Aug 21 21:46:17 2016 +0530
Committer: Varun Saxena 
Committed: Sun Aug 21 21:46:17 2016 +0530

--
 .../java/org/apache/hadoop/mapreduce/Job.java   |  2 +-
 .../org/apache/hadoop/mapreduce/TestJob.java| 36 
 2 files changed, 37 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d37b45d6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index 33e820b..45c065d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -422,7 +422,7 @@ public class Job extends JobContextImpl implements 
JobContext {
* The user-specified job name.
*/
   public String getJobName() {
-if (state == JobState.DEFINE) {
+if (state == JobState.DEFINE || status == null) {
   return super.getJobName();
 }
 ensureState(JobState.RUNNING);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d37b45d6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
index 71bacf7..60f390f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.JobStatus.State;
+import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -55,6 +56,41 @@ public class TestJob {
   }
 
   @Test
+  public void testUnexpectedJobStatus() throws Exception {
+Cluster cluster = mock(Cluster.class);
+JobID jobid = new JobID("1014873536921", 6);
+ClientProtocol clientProtocol = mock(ClientProtocol.class);
+when(cluster.getClient()).thenReturn(clientProtocol);
+JobStatus status = new JobStatus(jobid, 0f, 0f, 0f, 0f,
+State.RUNNING, JobPriority.DEFAULT, "root",
+"testUnexpectedJobStatus", "job file", "tracking URL");
+when(clientProtocol.getJobStatus(jobid)).thenReturn(status);
+Job job = Job.getInstance(cluster, status, new JobConf());
+
+// ensurer job status is RUNNING
+Assert.assertNotNull(job.getStatus());
+Assert.assertTrue(job.getStatus().getState() == State.RUNNING);
+
+// when updating job status, job client could not retrieve
+// job status, and status reset to null
+when(clientProtocol.getJobStatus(jobid)).thenReturn(null);
+
+try {
+  job.updateStatus();
+} catch (IOException e) {
+  Assert.assertTrue(e != null
+  && e.getMessage().contains("Job status not available"));
+}
+
+try {
+  ControlledJob cj = new ControlledJob(job, null);
+  Assert.assertNotNull(cj.toString());
+} catch (NullPointerException e) {
+  Assert.fail("job API fails with NPE");
+}
+  }
+
+  @Test
   public void testUGICredentialsPropogation() throws Exception {
 Credentials creds = new Credentials();
 Token token = mock(Token.class);



[30/50] [abbrv] hadoop git commit: YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)

2016-08-23 Thread wangda
YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a504ec91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a504ec91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a504ec91

Branch: refs/heads/YARN-3368
Commit: a504ec91db8e68f0c1f3366bf9975b6a5c373a04
Parents: 8aae8d6
Author: Wangda Tan 
Authored: Tue Dec 8 16:37:50 2015 -0800
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/.bowerrc |   4 +
 .../hadoop-yarn/hadoop-yarn-ui/.editorconfig|  34 +++
 .../hadoop-yarn/hadoop-yarn-ui/.ember-cli   |  11 +
 .../hadoop-yarn/hadoop-yarn-ui/.gitignore   |  17 ++
 .../hadoop-yarn/hadoop-yarn-ui/.jshintrc|  32 +++
 .../hadoop-yarn/hadoop-yarn-ui/.travis.yml  |  23 ++
 .../hadoop-yarn/hadoop-yarn-ui/.watchmanconfig  |   3 +
 .../hadoop-yarn/hadoop-yarn-ui/README.md|  24 ++
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |  19 ++
 .../app/adapters/cluster-metric.js  |  19 ++
 .../app/adapters/yarn-app-attempt.js|  31 +++
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |  25 ++
 .../app/adapters/yarn-container.js  |  42 +++
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |  19 ++
 .../hadoop-yarn/hadoop-yarn-ui/app/app.js   |  20 ++
 .../hadoop-yarn-ui/app/components/.gitkeep  |   0
 .../app/components/app-attempt-table.js |   4 +
 .../hadoop-yarn-ui/app/components/app-table.js  |   4 +
 .../hadoop-yarn-ui/app/components/bar-chart.js  | 104 +++
 .../app/components/base-chart-component.js  | 109 
 .../app/components/container-table.js   |   4 +
 .../app/components/donut-chart.js   | 148 ++
 .../app/components/item-selector.js |  21 ++
 .../app/components/queue-configuration-table.js |   4 +
 .../app/components/queue-navigator.js   |   4 +
 .../hadoop-yarn-ui/app/components/queue-view.js | 272 +++
 .../app/components/simple-table.js  |  30 ++
 .../app/components/timeline-view.js | 250 +
 .../app/components/tree-selector.js | 257 ++
 .../hadoop-yarn-ui/app/controllers/.gitkeep |   0
 .../app/controllers/cluster-overview.js |   5 +
 .../hadoop-yarn-ui/app/controllers/yarn-apps.js |   4 +
 .../app/controllers/yarn-queue.js   |   6 +
 .../hadoop-yarn-ui/app/helpers/.gitkeep |   0
 .../hadoop-yarn/hadoop-yarn-ui/app/index.html   |  25 ++
 .../hadoop-yarn-ui/app/models/.gitkeep  |   0
 .../hadoop-yarn-ui/app/models/cluster-info.js   |  13 +
 .../hadoop-yarn-ui/app/models/cluster-metric.js | 115 
 .../app/models/yarn-app-attempt.js  |  44 +++
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  65 +
 .../hadoop-yarn-ui/app/models/yarn-container.js |  39 +++
 .../hadoop-yarn-ui/app/models/yarn-queue.js |  76 ++
 .../hadoop-yarn-ui/app/models/yarn-user.js  |   8 +
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  16 ++
 .../hadoop-yarn-ui/app/routes/.gitkeep  |   0
 .../app/routes/cluster-overview.js  |  11 +
 .../app/routes/yarn-app-attempt.js  |  21 ++
 .../hadoop-yarn-ui/app/routes/yarn-app.js   |  10 +
 .../hadoop-yarn-ui/app/routes/yarn-apps.js  |   8 +
 .../hadoop-yarn-ui/app/routes/yarn-queue.js |  20 ++
 .../app/routes/yarn-queues/index.js |   5 +
 .../app/routes/yarn-queues/queues-selector.js   |   7 +
 .../app/serializers/cluster-info.js |  29 ++
 .../app/serializers/cluster-metric.js   |  29 ++
 .../app/serializers/yarn-app-attempt.js |  49 
 .../hadoop-yarn-ui/app/serializers/yarn-app.js  |  66 +
 .../app/serializers/yarn-container.js   |  54 
 .../app/serializers/yarn-queue.js   | 127 +
 .../hadoop-yarn-ui/app/styles/app.css   | 141 ++
 .../app/templates/application.hbs   |  25 ++
 .../app/templates/cluster-overview.hbs  |  56 
 .../app/templates/components/.gitkeep   |   0
 .../templates/components/app-attempt-table.hbs  |  28 ++
 .../app/templates/components/app-table.hbs  |  62 +
 .../templates/components/container-table.hbs|  36 +++
 .../components/queue-configuration-table.hbs|  40 +++
 .../templates/components/queue-navigator.hbs|  18 ++
 .../app/templates/components/timeline-view.hbs  |  35 +++
 .../app/templates/yarn-app-attempt.hbs  |  12 +
 .../hadoop-yarn-ui/app/templates/yarn-app.hbs   | 145 ++
 .../hadoop-yarn-ui/app/templates/yarn-apps.hbs  |   3 +
 .../hadoop-yarn-ui/app/templates/yarn-queue.hbs |  48 
 .../hadoop-yarn-ui/app/utils/converter.js   |  74 +
 

[21/50] [abbrv] hadoop git commit: HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
deleted file mode 100644
index 5ba1871..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import java.io.IOException;
-import java.net.URI;
-import java.nio.file.AccessDeniedException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-
-import com.amazonaws.auth.AWSCredentials;
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.auth.AWSCredentialsProviderChain;
-import com.amazonaws.auth.BasicAWSCredentials;
-import com.amazonaws.auth.InstanceProfileCredentialsProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.fs.s3a.Constants.*;
-import static org.apache.hadoop.fs.s3a.S3ATestConstants.*;
-import static org.apache.hadoop.fs.s3a.S3AUtils.*;
-import static org.junit.Assert.*;
-
-/**
- * Tests for {@link Constants#AWS_CREDENTIALS_PROVIDER} logic.
- *
- */
-public class TestS3AAWSCredentialsProvider {
-  private static final Logger LOG =
-  LoggerFactory.getLogger(TestS3AAWSCredentialsProvider.class);
-
-  @Rule
-  public Timeout testTimeout = new Timeout(1 * 60 * 1000);
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Declare what exception to raise, and the text which must be found
-   * in it.
-   * @param exceptionClass class of exception
-   * @param text text in exception
-   */
-  private void expectException(Class exceptionClass,
-  String text) {
-exception.expect(exceptionClass);
-exception.expectMessage(text);
-  }
-
-  @Test
-  public void testBadConfiguration() throws IOException {
-Configuration conf = new Configuration();
-conf.set(AWS_CREDENTIALS_PROVIDER, "no.such.class");
-try {
-  createFailingFS(conf);
-} catch (IOException e) {
-  if (!(e.getCause() instanceof ClassNotFoundException)) {
-LOG.error("Unexpected nested cause: {} in {}", e.getCause(), e, e);
-throw e;
-  }
-}
-  }
-
-  /**
-   * Create a filesystem, expect it to fail by raising an IOException.
-   * Raises an assertion exception if in fact the FS does get instantiated.
-   * @param conf configuration
-   * @throws IOException an expected exception.
-   */
-  private void createFailingFS(Configuration conf) throws IOException {
-S3AFileSystem fs = S3ATestUtils.createTestFileSystem(conf);
-fs.listStatus(new Path("/"));
-fail("Expected exception - got " + fs);
-  }
-
-  static class BadCredentialsProvider implements AWSCredentialsProvider {
-
-@SuppressWarnings("unused")
-public BadCredentialsProvider(URI name, Configuration conf) {
-}
-
-@Override
-public AWSCredentials getCredentials() {
-  return new BasicAWSCredentials("bad_key", "bad_secret");
-}
-
-@Override
-public void refresh() {
-}
-  }
-
-  @Test
-  public void testBadCredentials() throws Exception {
-Configuration conf = new Configuration();
-conf.set(AWS_CREDENTIALS_PROVIDER, BadCredentialsProvider.class.getName());
-try {
-  createFailingFS(conf);
-} catch (AccessDeniedException e) {
-  // expected
-}
-  }
-
-  static class GoodCredentialsProvider extends AWSCredentialsProviderChain {
-
-@SuppressWarnings("unused")
-public GoodCredentialsProvider(URI name, Configuration conf) {
-  super(new BasicAWSCredentialsProvider(conf.get(ACCESS_KEY),
-  conf.get(SECRET_KEY)), new 

[39/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js
deleted file mode 100644
index 447533e..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js
+++ /dev/null
@@ -1,58 +0,0 @@
-import Ember from 'ember';
-
-export default Ember.Component.extend({
-  didInsertElement: function() {
-var paging = this.get("paging") ? true : this.get("paging");
-var ordering = this.get("ordering") ? true : this.get("ordering");
-var info = this.get("info") ? true : this.get("info");
-var bFilter = this.get("bFilter") ? true : this.get("bFilter");
-
-// Defines sorter for the columns if not default.
-// Can also specify a custom sorter.
-var i;
-var colDefs = [];
-if (this.get("colTypes")) {
-  var typesArr = this.get("colTypes").split(' ');
-  var targetsArr = this.get("colTargets").split(' ');
-  for (i = 0; i < typesArr.length; i++) {
-console.log(typesArr[i] + " " + targetsArr[i]);
-colDefs.push({
-  type: typesArr[i],
-  targets: parseInt(targetsArr[i])
-});
-  }
-}
-// Defines initial column and sort order.
-var orderArr = [];
-if (this.get("colsOrder")) {
-  var cols = this.get("colsOrder").split(' ');
-  for (i = 0; i < cols.length; i++) {
-var col = cols[i].split(',');
-if (col.length != 2) {
-  continue;
-}
-var order = col[1].trim();
-if (order != 'asc' && order != 'desc') {
-  continue;
-}
-var colOrder = [];
-colOrder.push(parseInt(col[0]));
-colOrder.push(order);
-orderArr.push(colOrder);
-  }
-}
-if (orderArr.length == 0) {
-  var defaultOrder = [0, 'asc'];
-  orderArr.push(defaultOrder);
-}
-console.log(orderArr[0]);
-Ember.$('#' + this.get('table-id')).DataTable({
-  "paging":   paging,
-  "ordering": ordering, 
-  "info": info,
-  "bFilter": bFilter,
-  "order": orderArr,
-  "columnDefs": colDefs
-});
-  }
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
deleted file mode 100644
index fe402bb..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
+++ /dev/null
@@ -1,250 +0,0 @@
-import Ember from 'ember';
-import Converter from 'yarn-ui/utils/converter';
-
-export default Ember.Component.extend({
-  canvas: {
-svg: undefined,
-h: 0,
-w: 0,
-tooltip: undefined
-  },
-
-  clusterMetrics: undefined,
-  modelArr: [],
-  colors: d3.scale.category10().range(),
-  _selected: undefined,
-
-  selected: function() {
-return this._selected;
-  }.property(),
-
-  tableComponentName: function() {
-return "app-attempt-table";
-  }.property(),
-
-  setSelected: function(d) {
-if (this._selected == d) {
-  return;
-}
-
-// restore color
-if (this._selected) {
-  var dom = d3.select("#timeline-bar-" + this._selected.get("id"));
-  dom.attr("fill", this.colors[0]);
-}
-
-this._selected = d;
-this.set("selected", d);
-dom = d3.select("#timeline-bar-" + d.get("id"));
-dom.attr("fill", this.colors[1]);
-  },
-
-  getPerItemHeight: function() {
-var arrSize = this.modelArr.length;
-
-if (arrSize < 20) {
-  return 30;
-} else if (arrSize < 100) {
-  return 10;
-} else {
-  return 2;
-}
-  },
-
-  getPerItemGap: function() {
-var arrSize = this.modelArr.length;
-
-if (arrSize < 20) {
-  return 5;
-} else if (arrSize < 100) {
-  return 1;
-} else {
-  return 1;
-}
-  },
-
-  getCanvasHeight: function() {
-return (this.getPerItemHeight() + this.getPerItemGap()) * 
this.modelArr.length + 200;
-  },
-
-  draw: function(start, end) {
-// get w/h of the svg
-var bbox = d3.select("#" + this.get("parent-id"))
-  .node()
-  .getBoundingClientRect();
-this.canvas.w = bbox.width;
-this.canvas.h = this.getCanvasHeight();
-
-this.canvas.svg = d3.select("#" + this.get("parent-id"))
-  .append("svg")
-  .attr("width", this.canvas.w)
-  .attr("height", this.canvas.h)
-  .attr("id", this.get("my-id"));
-this.renderTimeline(start, end);
-  },
-
-  renderTimeline: function(start, end) {
-var border = 30;
-var singleBarHeight = 

[07/50] [abbrv] hadoop git commit: HDFS-10692. Update JDiff report's base version for HDFS from 2.6.0 to 2.7.2. Contributed by Wangda Tan.

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc7a1c54/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
new file mode 100644
index 000..028ba2d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
@@ -0,0 +1,21505 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+  
+
+
+  
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+  
+
+
+
+  
+  
+  
+  
+
+
+  
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+
+
+  
+  
+  
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+

[12/50] [abbrv] hadoop git commit: HADOOP-13526. Add detailed logging in KMS for the authentication failure of proxy user. Contributed by Suraj Acharya.

2016-08-23 Thread wangda
HADOOP-13526. Add detailed logging in KMS for the authentication failure of 
proxy user. Contributed by Suraj Acharya.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4070caad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4070caad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4070caad

Branch: refs/heads/YARN-3368
Commit: 4070caad70db49b50554088d29ac2fbc7ba62a0a
Parents: 3ca4d6d
Author: Xiao Chen 
Authored: Mon Aug 22 18:06:53 2016 -0700
Committer: Xiao Chen 
Committed: Mon Aug 22 18:09:35 2016 -0700

--
 .../web/DelegationTokenAuthenticationFilter.java  | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4070caad/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
index fb6817e..112c952 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.http.NameValuePair;
 import org.apache.http.client.utils.URLEncodedUtils;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
@@ -81,6 +83,9 @@ public class DelegationTokenAuthenticationFilter
   private static final String ERROR_EXCEPTION_JSON = "exception";
   private static final String ERROR_MESSAGE_JSON = "message";
 
+  private static final Logger LOG = LoggerFactory.getLogger(
+  DelegationTokenAuthenticationFilter.class);
+
   /**
* Sets an external DelegationTokenSecretManager instance to
* manage creation and verification of Delegation Tokens.
@@ -261,6 +266,11 @@ public class DelegationTokenAuthenticationFilter
 HttpExceptionUtils.createServletExceptionResponse(response,
 HttpServletResponse.SC_FORBIDDEN, ex);
 requestCompleted = true;
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Authentication exception: " + ex.getMessage(), ex);
+} else {
+  LOG.warn("Authentication exception: " + ex.getMessage());
+}
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
new file mode 100644
index 000..89858bf
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+  model(param) {
+return Ember.RSVP.hash({
+  selected : param.queue_name,
+  queues: this.store.findAll('yarnQueue'),
+  selectedQueue : undefined,
+  apps: undefined, // apps of selected queue
+});
+  },
+
+  afterModel(model) {
+model.selectedQueue = this.store.peekRecord('yarnQueue', model.selected);
+model.apps = this.store.findAll('yarnApp');
+model.apps.forEach(function(o) {
+  console.log(o);
+})
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
new file mode 100644
index 000..7da6f6d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export default Ember.Route.extend({
+  beforeModel() {
+this.transitionTo('yarnQueues.root');
+  }
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
new file mode 100644
index 000..3686c83
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+

[09/50] [abbrv] hadoop git commit: HDFS-10762. Pass IIP for file status related methods. Contributed by Daryn Sharp.

2016-08-23 Thread wangda
HDFS-10762. Pass IIP for file status related methods. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22fc46d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22fc46d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22fc46d7

Branch: refs/heads/YARN-3368
Commit: 22fc46d7659972ff016ccf1c6f781f0c160be26f
Parents: dc7a1c5
Author: Kihwal Lee 
Authored: Mon Aug 22 15:37:02 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 22 15:37:02 2016 -0500

--
 .../hdfs/server/namenode/FSDirAppendOp.java |  6 +-
 .../server/namenode/FSDirStatAndListingOp.java  | 80 +---
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  3 +-
 .../hdfs/server/namenode/FSDirectory.java   | 14 ++--
 .../hdfs/server/namenode/INodesInPath.java  | 42 --
 .../hadoop/hdfs/TestReservedRawPaths.java   | 21 +
 6 files changed, 102 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22fc46d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
index 3a5d7dc..5192352 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
@@ -85,9 +85,10 @@ final class FSDirAppendOp {
 final LocatedBlock lb;
 final FSDirectory fsd = fsn.getFSDirectory();
 final String src;
+final INodesInPath iip;
 fsd.writeLock();
 try {
-  final INodesInPath iip = fsd.resolvePathForWrite(pc, srcArg);
+  iip = fsd.resolvePathForWrite(pc, srcArg);
   src = iip.getPath();
   // Verify that the destination does not exist as a directory already
   final INode inode = iip.getLastINode();
@@ -148,8 +149,7 @@ final class FSDirAppendOp {
   fsd.writeUnlock();
 }
 
-HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, src, false,
-FSDirectory.isReservedRawName(srcArg));
+HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip);
 if (lb != null) {
   NameNode.stateChangeLog.debug(
   "DIR* NameSystem.appendFile: file {} for {} at {} block {} block"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22fc46d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index c9eedf5..88be510 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -108,16 +108,16 @@ class FSDirStatAndListingOp {
 if (!DFSUtil.isValidName(src)) {
   throw new InvalidPathException("Invalid file name: " + src);
 }
+final INodesInPath iip;
 if (fsd.isPermissionEnabled()) {
   FSPermissionChecker pc = fsd.getPermissionChecker();
-  final INodesInPath iip = fsd.resolvePath(pc, srcArg, resolveLink);
-  src = iip.getPath();
+  iip = fsd.resolvePath(pc, srcArg, resolveLink);
   fsd.checkPermission(pc, iip, false, null, null, null, null, false);
 } else {
   src = FSDirectory.resolvePath(srcArg, fsd);
+  iip = fsd.getINodesInPath(src, resolveLink);
 }
-return getFileInfo(fsd, src, FSDirectory.isReservedRawName(srcArg),
-   resolveLink);
+return getFileInfo(fsd, iip);
   }
 
   /**
@@ -230,7 +230,6 @@ class FSDirStatAndListingOp {
   String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
   throws IOException {
 String srcs = FSDirectory.normalizePath(src);
-final boolean isRawPath = FSDirectory.isReservedRawName(src);
 if (FSDirectory.isExactReservedName(srcs)) {
   return getReservedListing(fsd);
 }
@@ -257,7 +256,7 @@ class FSDirStatAndListingOp {
 return new DirectoryListing(
 new HdfsFileStatus[]{ createFileStatus(
 fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs,
-needLocation, parentStoragePolicy, snapshot, 

[36/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
new file mode 100644
index 000..f7ec020
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  // Map: 
+  map : undefined,
+
+  // Normalized data for d3
+  treeData: undefined,
+
+  // folded queues, folded[] == true means  is folded
+  foldedQueues: { },
+
+  // maxDepth
+  maxDepth: 0,
+
+  // num of leaf queue, folded queue is treated as leaf queue
+  numOfLeafQueue: 0,
+
+  // mainSvg
+  mainSvg: undefined,
+
+  // Init data
+  initData: function() {
+this.map = { };
+this.treeData = { };
+this.maxDepth = 0;
+this.numOfLeafQueue = 0;
+
+this.get("model")
+  .forEach(function(o) {
+this.map[o.id] = o;
+  }.bind(this));
+
+var selected = this.get("selected");
+
+this.initQueue("root", 1, this.treeData);
+  },
+
+  // get Children array of given queue
+  getChildrenNamesArray: function(q) {
+var namesArr = [];
+
+// Folded queue's children is empty
+if (this.foldedQueues[q.get("name")]) {
+  return namesArr;
+}
+
+var names = q.get("children");
+if (names) {
+  names.forEach(function(name) {
+namesArr.push(name);
+  });
+}
+
+return namesArr;
+  },
+
+  // Init queues
+  initQueue: function(queueName, depth, node) {
+if ((!queueName) || (!this.map[queueName])) {
+  // Queue is not existed
+  return;
+}
+
+if (depth > this.maxDepth) {
+  this.maxDepth = this.maxDepth + 1;
+}
+
+var queue = this.map[queueName];
+
+var names = this.getChildrenNamesArray(queue);
+
+node.name = queueName;
+node.parent = queue.get("parent");
+node.queueData = queue;
+
+if (names.length > 0) {
+  node.children = [];
+
+  names.forEach(function(name) {
+var childQueueData = {};
+node.children.push(childQueueData);
+this.initQueue(name, depth + 1, childQueueData);
+  }.bind(this));
+} else {
+  this.numOfLeafQueue = this.numOfLeafQueue + 1;
+}
+  },
+
+  update: function(source, root, tree, diagonal) {
+var duration = 300;
+var i = 0;
+
+// Compute the new tree layout.
+var nodes = tree.nodes(root).reverse();
+var links = tree.links(nodes);
+
+// Normalize for fixed-depth.
+nodes.forEach(function(d) { d.y = d.depth * 200; });
+
+// Update the nodes…
+var node = this.mainSvg.selectAll("g.node")
+  .data(nodes, function(d) { return d.id || (d.id = ++i); });
+
+// Enter any new nodes at the parent's previous position.
+var nodeEnter = node.enter().append("g")
+  .attr("class", "node")
+  .attr("transform", function(d) { return "translate(" + source.y0 + "," + 
source.x0 + ")"; })
+  .on("click", function(d,i){
+if (d.queueData.get("name") != this.get("selected")) {
+document.location.href = "yarnQueue/" + d.queueData.get("name");
+}
+  }.bind(this));
+  // .on("click", click);
+
+nodeEnter.append("circle")
+  .attr("r", 1e-6)
+  .style("fill", function(d) {
+var usedCap = d.queueData.get("usedCapacity");
+if (usedCap <= 60.0) {
+  return "LimeGreen";
+} else if (usedCap <= 100.0) {
+  return "DarkOrange";
+} else {
+  return "LightCoral";
+}
+  });
+
+// append percentage
+nodeEnter.append("text")
+  .attr("x", function(d) { return 0; })
+  .attr("dy", ".35em")
+  .attr("text-anchor", function(d) { return "middle"; })
+  .text(function(d) {
+var usedCap = d.queueData.get("usedCapacity");
+if (usedCap >= 100.0) {
+

[11/50] [abbrv] hadoop git commit: Revert "HDFS-10762. Pass IIP for file status related methods. Contributed by Daryn Sharp."

2016-08-23 Thread wangda
Revert "HDFS-10762. Pass IIP for file status related methods. Contributed by 
Daryn Sharp."

This reverts commit 22fc46d7659972ff016ccf1c6f781f0c160be26f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ca4d6dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ca4d6dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ca4d6dd

Branch: refs/heads/YARN-3368
Commit: 3ca4d6ddfd199c95677721ff3bcb95d1da45bd88
Parents: f4d4d34
Author: Kihwal Lee 
Authored: Mon Aug 22 16:57:45 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 22 16:57:45 2016 -0500

--
 .../hdfs/server/namenode/FSDirAppendOp.java |  6 +-
 .../server/namenode/FSDirStatAndListingOp.java  | 80 +++-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  3 +-
 .../hdfs/server/namenode/FSDirectory.java   | 14 ++--
 .../hdfs/server/namenode/INodesInPath.java  | 42 ++
 .../hadoop/hdfs/TestReservedRawPaths.java   | 21 -
 6 files changed, 64 insertions(+), 102 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ca4d6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
index 5192352..3a5d7dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
@@ -85,10 +85,9 @@ final class FSDirAppendOp {
 final LocatedBlock lb;
 final FSDirectory fsd = fsn.getFSDirectory();
 final String src;
-final INodesInPath iip;
 fsd.writeLock();
 try {
-  iip = fsd.resolvePathForWrite(pc, srcArg);
+  final INodesInPath iip = fsd.resolvePathForWrite(pc, srcArg);
   src = iip.getPath();
   // Verify that the destination does not exist as a directory already
   final INode inode = iip.getLastINode();
@@ -149,7 +148,8 @@ final class FSDirAppendOp {
   fsd.writeUnlock();
 }
 
-HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip);
+HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, src, false,
+FSDirectory.isReservedRawName(srcArg));
 if (lb != null) {
   NameNode.stateChangeLog.debug(
   "DIR* NameSystem.appendFile: file {} for {} at {} block {} block"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ca4d6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 88be510..c9eedf5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -108,16 +108,16 @@ class FSDirStatAndListingOp {
 if (!DFSUtil.isValidName(src)) {
   throw new InvalidPathException("Invalid file name: " + src);
 }
-final INodesInPath iip;
 if (fsd.isPermissionEnabled()) {
   FSPermissionChecker pc = fsd.getPermissionChecker();
-  iip = fsd.resolvePath(pc, srcArg, resolveLink);
+  final INodesInPath iip = fsd.resolvePath(pc, srcArg, resolveLink);
+  src = iip.getPath();
   fsd.checkPermission(pc, iip, false, null, null, null, null, false);
 } else {
   src = FSDirectory.resolvePath(srcArg, fsd);
-  iip = fsd.getINodesInPath(src, resolveLink);
 }
-return getFileInfo(fsd, iip);
+return getFileInfo(fsd, src, FSDirectory.isReservedRawName(srcArg),
+   resolveLink);
   }
 
   /**
@@ -230,6 +230,7 @@ class FSDirStatAndListingOp {
   String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
   throws IOException {
 String srcs = FSDirectory.normalizePath(src);
+final boolean isRawPath = FSDirectory.isReservedRawName(src);
 if (FSDirectory.isExactReservedName(srcs)) {
   return getReservedListing(fsd);
 }
@@ -256,7 +257,7 @@ class FSDirStatAndListingOp {
 return new DirectoryListing(
 new HdfsFileStatus[]{ createFileStatus(
 fsd, HdfsFileStatus.EMPTY_NAME, 

[17/50] [abbrv] hadoop git commit: HADOOP-13524. mvn eclipse:eclipse generates .gitignore'able files. Contributed by Vinod Kumar Vavilapalli

2016-08-23 Thread wangda
HADOOP-13524. mvn eclipse:eclipse generates .gitignore'able files. Contributed 
by Vinod Kumar Vavilapalli


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd76238a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd76238a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd76238a

Branch: refs/heads/YARN-3368
Commit: dd76238a3bafd58faa6f38f075505bef1012f150
Parents: e90f335
Author: Jian He 
Authored: Tue Aug 23 19:13:14 2016 +0800
Committer: Jian He 
Committed: Tue Aug 23 19:13:14 2016 +0800

--
 .gitignore | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd76238a/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 998287d..a5d69d0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,11 @@
 .settings
 target
 build
+
+# External tool builders
+*/.externalToolBuilders
+*/maven-eclipse.xml
+
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-08-23 Thread wangda
YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to 
mvn, and fix licenses. (wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6068a841
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6068a841
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6068a841

Branch: refs/heads/YARN-3368
Commit: 6068a84183a42497cb77cda9aba2890fc31dbe72
Parents: 3d0e0a7
Author: Wangda Tan 
Authored: Mon Mar 21 14:03:13 2016 -0700
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 .gitignore  |  13 +
 BUILDING.txt|   4 +-
 LICENSE.txt |  80 +
 dev-support/create-release.sh   | 144 +
 dev-support/docker/Dockerfile   |   5 +
 .../src/site/markdown/YarnUI2.md|  43 +++
 .../hadoop-yarn/hadoop-yarn-ui/.bowerrc |   4 -
 .../hadoop-yarn/hadoop-yarn-ui/.editorconfig|  34 ---
 .../hadoop-yarn/hadoop-yarn-ui/.ember-cli   |  11 -
 .../hadoop-yarn/hadoop-yarn-ui/.gitignore   |  17 --
 .../hadoop-yarn/hadoop-yarn-ui/.jshintrc|  32 --
 .../hadoop-yarn/hadoop-yarn-ui/.travis.yml  |  23 --
 .../hadoop-yarn/hadoop-yarn-ui/.watchmanconfig  |   3 -
 .../hadoop-yarn/hadoop-yarn-ui/README.md|  24 --
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |  20 --
 .../app/adapters/cluster-metric.js  |  20 --
 .../app/adapters/yarn-app-attempt.js|  32 --
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |  26 --
 .../app/adapters/yarn-container-log.js  |  74 -
 .../app/adapters/yarn-container.js  |  43 ---
 .../app/adapters/yarn-node-app.js   |  63 
 .../app/adapters/yarn-node-container.js |  64 
 .../hadoop-yarn-ui/app/adapters/yarn-node.js|  40 ---
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |  20 --
 .../hadoop-yarn-ui/app/adapters/yarn-rm-node.js |  45 ---
 .../hadoop-yarn/hadoop-yarn-ui/app/app.js   |  20 --
 .../hadoop-yarn-ui/app/components/.gitkeep  |   0
 .../app/components/app-attempt-table.js |   4 -
 .../hadoop-yarn-ui/app/components/app-table.js  |   4 -
 .../hadoop-yarn-ui/app/components/bar-chart.js  | 104 ---
 .../app/components/base-chart-component.js  | 109 ---
 .../app/components/container-table.js   |   4 -
 .../app/components/donut-chart.js   | 148 --
 .../app/components/item-selector.js |  21 --
 .../app/components/queue-configuration-table.js |   4 -
 .../app/components/queue-navigator.js   |   4 -
 .../hadoop-yarn-ui/app/components/queue-view.js | 272 -
 .../app/components/simple-table.js  |  58 
 .../app/components/timeline-view.js | 250 
 .../app/components/tree-selector.js | 257 
 .../hadoop-yarn/hadoop-yarn-ui/app/config.js|  27 --
 .../hadoop-yarn/hadoop-yarn-ui/app/constants.js |  24 --
 .../hadoop-yarn-ui/app/controllers/.gitkeep |   0
 .../app/controllers/application.js  |  55 
 .../app/controllers/cluster-overview.js |   5 -
 .../hadoop-yarn-ui/app/controllers/yarn-apps.js |   4 -
 .../app/controllers/yarn-queue.js   |   6 -
 .../hadoop-yarn-ui/app/helpers/.gitkeep |   0
 .../hadoop-yarn-ui/app/helpers/divide.js|  31 --
 .../app/helpers/log-files-comma.js  |  48 ---
 .../hadoop-yarn-ui/app/helpers/node-link.js |  37 ---
 .../hadoop-yarn-ui/app/helpers/node-menu.js |  66 -
 .../hadoop-yarn/hadoop-yarn-ui/app/index.html   |  25 --
 .../hadoop-yarn-ui/app/models/.gitkeep  |   0
 .../hadoop-yarn-ui/app/models/cluster-info.js   |  13 -
 .../hadoop-yarn-ui/app/models/cluster-metric.js | 115 
 .../app/models/yarn-app-attempt.js  |  44 ---
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  65 -
 .../app/models/yarn-container-log.js|  25 --
 .../hadoop-yarn-ui/app/models/yarn-container.js |  39 ---
 .../hadoop-yarn-ui/app/models/yarn-node-app.js  |  44 ---
 .../app/models/yarn-node-container.js   |  57 
 .../hadoop-yarn-ui/app/models/yarn-node.js  |  33 ---
 .../hadoop-yarn-ui/app/models/yarn-queue.js |  76 -
 .../hadoop-yarn-ui/app/models/yarn-rm-node.js   |  92 --
 .../hadoop-yarn-ui/app/models/yarn-user.js  |   8 -
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  29 --
 .../hadoop-yarn-ui/app/routes/.gitkeep  |   0
 .../hadoop-yarn-ui/app/routes/application.js|  38 ---
 .../app/routes/cluster-overview.js  |  11 -
 .../hadoop-yarn-ui/app/routes/index.js  |  29 --
 .../app/routes/yarn-app-attempt.js  |  21 --
 

[01/50] [abbrv] hadoop git commit: HADOOP-13428. Fixed hadoop-common build files to generate jdiff by working around a jdiff-bug. Contributed by Wangda Tan. [Forced Update!]

2016-08-23 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368 818c015db -> 6224ea918 (forced update)


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99603e90/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index c28a05c..54d1cdd 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -34,6 +34,7 @@
 src/test/resources/kdc
 common
 true
+true
 ../etc/hadoop
 wsce-site.xml
   
@@ -512,6 +513,7 @@
 src/test/resources/test.har/_masterindex
 src/test/resources/test.har/part-0
 src/test/resources/javakeystoreprovider.password
+dev-support/jdiff-workaround.patch
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99603e90/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 0357269..0ee9895 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -160,7 +160,7 @@
 false
   
   
-2.6.0
+2.7.2
 -unstable
 
 
@@ -173,7 +173,7 @@
 org.apache.maven.plugins
 maven-javadoc-plugin
 
-  
+ 
 
   javadoc
 
@@ -241,6 +241,26 @@
 org.apache.maven.plugins
 maven-antrun-plugin
 
+
+  
+  
+pre-site
+prepare-package
+
+  run
+
+
+  
+
+
+
+
+  
+
+  
+
+
   
 site
 prepare-package
@@ -249,7 +269,6 @@
 
 
   
-
 
 
 
@@ -302,6 +321,25 @@
   
 
   
+
+  
+  
+post-site
+prepare-package
+
+  run
+
+
+  
+
+
+
+
+
+  
+
+  
+
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] [abbrv] hadoop git commit: HADOOP-13487. Hadoop KMS should load old delegation tokens from Zookeeper on startup. Contributed by Xiao Chen.

2016-08-23 Thread wangda
HADOOP-13487. Hadoop KMS should load old delegation tokens from Zookeeper on 
startup. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d4d347
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d4d347
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d4d347

Branch: refs/heads/YARN-3368
Commit: f4d4d3474cfd2d1f2d243f5ae5cec17af38270b1
Parents: 22fc46d
Author: Xiao Chen 
Authored: Mon Aug 22 14:31:13 2016 -0700
Committer: Xiao Chen 
Committed: Mon Aug 22 14:42:13 2016 -0700

--
 .../ZKDelegationTokenSecretManager.java | 44 +
 .../TestZKDelegationTokenSecretManager.java | 93 +++-
 2 files changed, 136 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d4d347/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index c3ad9f3..6c66e98 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -361,6 +361,7 @@ public abstract class 
ZKDelegationTokenSecretManager children;
+if (isTokenCache) {
+  children = tokenCache.getCurrentData();
+} else {
+  children = keyCache.getCurrentData();
+}
+
+int count = 0;
+for (ChildData child : children) {
+  try {
+if (isTokenCache) {
+  processTokenAddOrUpdate(child);
+} else {
+  processKeyAddOrUpdate(child.getData());
+}
+  } catch (Exception e) {
+LOG.info("Ignoring node {} because it failed to load.",
+child.getPath());
+LOG.debug("Failure exception:", e);
+++count;
+  }
+}
+if (count > 0) {
+  LOG.warn("Ignored {} nodes while loading {} cache.", count, cacheName);
+}
+LOG.info("Loaded {} cache.", cacheName);
+  }
+
   private void processKeyAddOrUpdate(byte[] data) throws IOException {
 ByteArrayInputStream bin = new ByteArrayInputStream(data);
 DataInputStream din = new DataInputStream(bin);
@@ -890,4 +929,9 @@ public abstract class 
ZKDelegationTokenSecretManagerhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d4d347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
index 185a994..c9571ff2 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 
+import com.google.common.base.Supplier;
 import org.apache.curator.RetryPolicy;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
@@ -37,6 +38,7 @@ import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.zookeeper.ZooDefs;
 import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.data.Id;
@@ -44,12 +46,18 @@ import 
org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.junit.Assert.fail;
 
-import org.junit.Test;
 
 public class TestZKDelegationTokenSecretManager {
+  private static final Logger LOG =
+  

[46/50] [abbrv] hadoop git commit: YARN-5161. [YARN-3368] Add Apache Hadoop logo in YarnUI home page. (Kai Sasaki via Sunil G)

2016-08-23 Thread wangda
YARN-5161. [YARN-3368] Add Apache Hadoop logo in YarnUI home page. (Kai Sasaki 
via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f40a995
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f40a995
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f40a995

Branch: refs/heads/YARN-3368
Commit: 0f40a9950cca3e0f91a169dbf57958c9336ed952
Parents: 7929e93
Author: Sunil 
Authored: Mon Jul 11 14:31:25 2016 +0530
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 .../src/main/webapp/app/styles/app.css |  11 +++
 .../src/main/webapp/app/templates/application.hbs  |  12 +++-
 .../webapp/public/assets/images/hadoop_logo.png| Bin 0 -> 26495 bytes
 3 files changed, 18 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f40a995/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
index bcb6aab..e2d09dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
@@ -157,3 +157,14 @@ table.dataTable thead .sorting_desc_disabled {
   stroke: #ccc;  
   stroke-width: 2px;
 }
+
+.hadoop-brand-image {
+  margin-top: -10px;
+  width: auto;
+  height: 45px;
+}
+
+li a.navigation-link.ember-view {
+  color: #2196f3;
+  font-weight: bold;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f40a995/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
index b45ec6b..03b2c4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
@@ -20,35 +20,37 @@
   
 
 
+  
+
+  
   
 Toggle navigation
 
 
 
   
-  Apache Hadoop YARN
 
 
 
 
   
 {{#link-to 'yarn-queue' 'root' tagName="li"}}
-  {{#link-to 'yarn-queue' 'root'}}Queues
+  {{#link-to 'yarn-queue' 'root' class="navigation-link"}}Queues
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'yarn-apps' tagName="li"}}
-  {{#link-to 'yarn-apps'}}Applications
+  {{#link-to 'yarn-apps' class="navigation-link"}}Applications
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'cluster-overview' tagName="li"}}
-  {{#link-to 'cluster-overview'}}Cluster Overview
+  {{#link-to 'cluster-overview' class="navigation-link"}}Cluster 
Overview
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'yarn-nodes' tagName="li"}}
-  {{#link-to 'yarn-nodes'}}Nodes
+  {{#link-to 'yarn-nodes' class="navigation-link"}}Nodes
 (current)
   {{/link-to}}
 {{/link-to}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f40a995/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
new file mode 100644
index 000..275d39e
Binary files /dev/null and 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-4514. [YARN-3368] Cleanup hardcoded configurations, such as RM/ATS addresses. (Sunil G via wangda)

2016-08-23 Thread wangda
YARN-4514. [YARN-3368] Cleanup hardcoded configurations, such as RM/ATS 
addresses. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfb5edae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfb5edae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfb5edae

Branch: refs/heads/YARN-3368
Commit: dfb5edaed62d1bc1238f67187dfd49df6629edbe
Parents: 6068a84
Author: Wangda Tan 
Authored: Sat Apr 16 23:04:45 2016 -0700
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 .../src/main/webapp/app/adapters/abstract.js| 48 +
 .../main/webapp/app/adapters/cluster-info.js| 22 ++
 .../main/webapp/app/adapters/cluster-metric.js  | 22 ++
 .../webapp/app/adapters/yarn-app-attempt.js | 24 ++-
 .../src/main/webapp/app/adapters/yarn-app.js| 27 ++-
 .../webapp/app/adapters/yarn-container-log.js   | 10 ++-
 .../main/webapp/app/adapters/yarn-container.js  | 20 +++---
 .../main/webapp/app/adapters/yarn-node-app.js   | 24 +++
 .../webapp/app/adapters/yarn-node-container.js  | 24 +++
 .../src/main/webapp/app/adapters/yarn-node.js   | 23 +++---
 .../src/main/webapp/app/adapters/yarn-queue.js  | 22 ++
 .../main/webapp/app/adapters/yarn-rm-node.js| 21 ++
 .../hadoop-yarn-ui/src/main/webapp/app/app.js   |  4 +-
 .../src/main/webapp/app/config.js   |  5 +-
 .../src/main/webapp/app/index.html  |  1 +
 .../src/main/webapp/app/initializers/env.js | 29 
 .../src/main/webapp/app/initializers/hosts.js   | 28 
 .../src/main/webapp/app/services/env.js | 59 
 .../src/main/webapp/app/services/hosts.js   | 74 
 .../hadoop-yarn-ui/src/main/webapp/bower.json   | 25 +++
 .../src/main/webapp/config/configs.env  | 48 +
 .../src/main/webapp/config/default-config.js| 32 +
 .../src/main/webapp/config/environment.js   | 11 ++-
 .../src/main/webapp/ember-cli-build.js  | 10 ++-
 .../hadoop-yarn-ui/src/main/webapp/package.json | 35 -
 .../webapp/tests/unit/initializers/env-test.js  | 41 +++
 .../tests/unit/initializers/hosts-test.js   | 41 +++
 .../tests/unit/initializers/jquery-test.js  | 41 +++
 .../main/webapp/tests/unit/services/env-test.js | 30 
 .../webapp/tests/unit/services/hosts-test.js| 30 
 30 files changed, 637 insertions(+), 194 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfb5edae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
new file mode 100644
index 000..c7e5c36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import Ember from 'ember';
+
+export default DS.JSONAPIAdapter.extend({
+  address: null, //Must be set by inheriting classes
+  restNameSpace: null, //Must be set by inheriting classes
+  serverName: null, //Must be set by inheriting classes
+
+  headers: {
+Accept: 'application/json'
+  },
+
+  host: Ember.computed("address", function () {
+var address = this.get("address");
+return this.get(`hosts.${address}`);
+  }),
+
+  namespace: Ember.computed("restNameSpace", function () {
+var serverName = this.get("restNameSpace");
+return this.get(`env.app.namespaces.${serverName}`);
+  }),
+
+  ajax: function(url, method, options) {
+options = options || {};
+options.crossDomain = true;
+options.xhrFields = {
+  withCredentials: true
+};
+options.targetServer = this.get('serverName');
+return this._super(url, method, 

[37/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
new file mode 100644
index 000..66bf54a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -0,0 +1,207 @@
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/maven-v4_0_0.xsd;>
+  
+hadoop-yarn
+org.apache.hadoop
+3.0.0-SNAPSHOT
+  
+  4.0.0
+  org.apache.hadoop
+  hadoop-yarn-ui
+  3.0.0-SNAPSHOT
+  Apache Hadoop YARN UI
+  ${packaging.type}
+
+  
+jar
+src/main/webapp
+node
+v0.12.2
+2.10.0
+false
+  
+
+  
+
+  
+  
+org.apache.rat
+apache-rat-plugin
+
+  
+src/main/webapp/node_modules/**/*
+src/main/webapp/bower_components/**/*
+src/main/webapp/jsconfig.json
+src/main/webapp/bower.json
+src/main/webapp/package.json
+src/main/webapp/testem.json
+src/main/webapp/public/assets/images/**/*
+src/main/webapp/public/robots.txt
+public/crossdomain.xml
+  
+
+  
+
+  
+ maven-clean-plugin
+ 3.0.0
+ 
+false
+
+   
+  
${basedir}/src/main/webapp/bower_components
+   
+   
+  
${basedir}/src/main/webapp/node_modules
+   
+
+ 
+  
+
+  
+
+  
+
+  yarn-ui
+
+  
+false
+  
+
+  
+war
+  
+
+  
+
+  
+  
+exec-maven-plugin
+org.codehaus.mojo
+
+  
+generate-sources
+npm install
+
+  exec
+
+
+  ${webappDir}
+  npm
+  
+install
+  
+
+  
+  
+generate-sources
+bower install
+
+  exec
+
+
+  ${webappDir}
+  bower
+  
+--allow-root
+install
+  
+
+  
+  
+generate-sources
+bower --allow-root install
+
+  exec
+
+
+  ${webappDir}
+  bower
+  
+--allow-root
+install
+  
+
+  
+  
+ember build
+generate-sources
+
+  exec
+
+
+  ${webappDir}
+  ember
+  
+build
+-prod
+--output-path
+${basedir}/target/dist
+  
+
+  
+  
+ember test
+generate-resources
+
+  exec
+
+
+  ${skipTests}
+  ${webappDir}
+  ember
+  
+test
+  
+
+  
+  
+cleanup tmp
+generate-sources
+
+  exec
+
+
+  ${webappDir}
+  rm
+  
+-rf
+tmp
+  
+
+  
+
+  
+
+  
+  
+org.apache.maven.plugins
+maven-war-plugin
+
+  ${basedir}/src/main/webapp/WEB-INF/web.xml
+  ${basedir}/target/dist
+
+  
+
+
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt
deleted file mode 100644
index f591645..000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-# http://www.robotstxt.org
-User-agent: *
-Disallow:


[23/50] [abbrv] hadoop git commit: HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

2016-08-23 Thread wangda
HADOOP-13446. Support running isolated unit tests separate from AWS integration 
tests. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f9c346e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f9c346e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f9c346e

Branch: refs/heads/YARN-3368
Commit: 6f9c346e577325ec2059d83d5636b5ff7fa6cdce
Parents: f0efea4
Author: Chris Nauroth 
Authored: Tue Aug 23 07:18:49 2016 -0700
Committer: Chris Nauroth 
Committed: Tue Aug 23 07:18:49 2016 -0700

--
 hadoop-project/pom.xml  |   5 +
 hadoop-tools/hadoop-aws/pom.xml |  95 +++-
 .../src/site/markdown/tools/hadoop-aws/index.md |  67 ++-
 .../fs/contract/s3a/ITestS3AContractCreate.java |  35 ++
 .../fs/contract/s3a/ITestS3AContractDelete.java |  34 ++
 .../fs/contract/s3a/ITestS3AContractDistCp.java |  46 ++
 .../s3a/ITestS3AContractGetFileStatus.java  |  51 ++
 .../fs/contract/s3a/ITestS3AContractMkdir.java  |  34 ++
 .../fs/contract/s3a/ITestS3AContractOpen.java   |  34 ++
 .../fs/contract/s3a/ITestS3AContractRename.java |  62 +++
 .../contract/s3a/ITestS3AContractRootDir.java   |  72 +++
 .../fs/contract/s3a/ITestS3AContractSeek.java   |  34 ++
 .../fs/contract/s3a/TestS3AContractCreate.java  |  33 --
 .../fs/contract/s3a/TestS3AContractDelete.java  |  31 --
 .../fs/contract/s3a/TestS3AContractDistCp.java  |  46 --
 .../s3a/TestS3AContractGetFileStatus.java   |  47 --
 .../fs/contract/s3a/TestS3AContractMkdir.java   |  34 --
 .../fs/contract/s3a/TestS3AContractOpen.java|  31 --
 .../fs/contract/s3a/TestS3AContractRename.java  |  61 ---
 .../fs/contract/s3a/TestS3AContractRootDir.java |  72 ---
 .../fs/contract/s3a/TestS3AContractSeek.java|  31 --
 .../fs/contract/s3n/ITestS3NContractCreate.java |  41 ++
 .../fs/contract/s3n/ITestS3NContractDelete.java |  34 ++
 .../fs/contract/s3n/ITestS3NContractMkdir.java  |  34 ++
 .../fs/contract/s3n/ITestS3NContractOpen.java   |  34 ++
 .../fs/contract/s3n/ITestS3NContractRename.java |  35 ++
 .../contract/s3n/ITestS3NContractRootDir.java   |  35 ++
 .../fs/contract/s3n/ITestS3NContractSeek.java   |  34 ++
 .../fs/contract/s3n/TestS3NContractCreate.java  |  38 --
 .../fs/contract/s3n/TestS3NContractDelete.java  |  31 --
 .../fs/contract/s3n/TestS3NContractMkdir.java   |  34 --
 .../fs/contract/s3n/TestS3NContractOpen.java|  31 --
 .../fs/contract/s3n/TestS3NContractRename.java  |  32 --
 .../fs/contract/s3n/TestS3NContractRootDir.java |  35 --
 .../fs/contract/s3n/TestS3NContractSeek.java|  31 --
 .../ITestBlockingThreadPoolExecutorService.java | 182 +++
 .../fs/s3a/ITestS3AAWSCredentialsProvider.java  | 250 +
 .../fs/s3a/ITestS3ABlockingThreadPool.java  |  80 +++
 .../apache/hadoop/fs/s3a/ITestS3ABlocksize.java |  96 
 .../hadoop/fs/s3a/ITestS3AConfiguration.java| 435 +++
 .../hadoop/fs/s3a/ITestS3ACredentialsInURL.java | 155 ++
 .../hadoop/fs/s3a/ITestS3AEncryption.java   | 104 
 .../ITestS3AEncryptionAlgorithmPropagation.java |  83 +++
 .../s3a/ITestS3AEncryptionFastOutputStream.java |  35 ++
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  | 192 +++
 .../hadoop/fs/s3a/ITestS3AFastOutputStream.java |  74 +++
 .../fs/s3a/ITestS3AFileOperationCost.java   | 191 +++
 .../fs/s3a/ITestS3AFileSystemContract.java  | 106 
 .../fs/s3a/ITestS3ATemporaryCredentials.java| 148 +
 .../TestBlockingThreadPoolExecutorService.java  | 182 ---
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 250 -
 .../fs/s3a/TestS3ABlockingThreadPool.java   |  80 ---
 .../apache/hadoop/fs/s3a/TestS3ABlocksize.java  |  93 
 .../hadoop/fs/s3a/TestS3AConfiguration.java | 429 ---
 .../hadoop/fs/s3a/TestS3ACredentialsInURL.java  | 155 --
 .../apache/hadoop/fs/s3a/TestS3AEncryption.java | 104 
 .../TestS3AEncryptionAlgorithmPropagation.java  |  82 ---
 .../s3a/TestS3AEncryptionFastOutputStream.java  |  35 --
 .../hadoop/fs/s3a/TestS3AFailureHandling.java   | 194 ---
 .../hadoop/fs/s3a/TestS3AFastOutputStream.java  |  74 ---
 .../hadoop/fs/s3a/TestS3AFileOperationCost.java | 191 ---
 .../fs/s3a/TestS3AFileSystemContract.java   | 104 
 .../fs/s3a/TestS3ATemporaryCredentials.java | 148 -
 .../fs/s3a/fileContext/ITestS3AFileContext.java |  23 +
 .../ITestS3AFileContextCreateMkdir.java |  35 ++
 .../ITestS3AFileContextMainOperations.java  |  60 +++
 .../ITestS3AFileContextStatistics.java  |  61 +++
 .../s3a/fileContext/ITestS3AFileContextURI.java |  44 ++
 .../fileContext/ITestS3AFileContextUtil.java|  34 ++
 .../fs/s3a/fileContext/TestS3AFileContext.java  |  23 -
 .../TestS3AFileContextCreateMkdir.java  |  35 --
 .../TestS3AFileContextMainOperations.java   

[41/50] [abbrv] hadoop git commit: YARN-5509. Build error due to preparing 3.0.0-alpha2 deployment. (Kai Sasaki via wangda)

2016-08-23 Thread wangda
YARN-5509. Build error due to preparing 3.0.0-alpha2 deployment. (Kai Sasaki 
via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8dfa509
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8dfa509
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8dfa509

Branch: refs/heads/YARN-3368
Commit: f8dfa50997b1c2eebe2017f83bb7c43331a49180
Parents: 51dda0f
Author: Wangda Tan 
Authored: Thu Aug 11 14:59:14 2016 -0700
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8dfa509/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 6d46fda..2933a76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -20,12 +20,12 @@
   
 hadoop-yarn
 org.apache.hadoop
-3.0.0-alpha1-SNAPSHOT
+3.0.0-alpha2-SNAPSHOT
   
   4.0.0
   org.apache.hadoop
   hadoop-yarn-ui
-  3.0.0-alpha1-SNAPSHOT
+  3.0.0-alpha2-SNAPSHOT
   Apache Hadoop YARN UI
   ${packaging.type}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
deleted file mode 100644
index 5877589..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleForModel, test } from 'ember-qunit';
-
-moduleForModel('yarn-node', 'Unit | Model | Node', {
-  // Specify the other units that are required for this test.
-  needs: []
-});
-
-test('Basic creation test', function(assert) {
-  let model = this.subject();
-
-  assert.ok(model);
-  assert.ok(model._notifyProperties);
-  assert.ok(model.didLoad);
-  assert.ok(model.totalVmemAllocatedContainersMB);
-  assert.ok(model.vmemCheckEnabled);
-  assert.ok(model.pmemCheckEnabled);
-  assert.ok(model.nodeHealthy);
-  assert.ok(model.lastNodeUpdateTime);
-  assert.ok(model.healthReport);
-  assert.ok(model.nmStartupTime);
-  assert.ok(model.nodeManagerBuildVersion);
-  assert.ok(model.hadoopBuildVersion);
-});
-
-test('test fields', function(assert) {
-  let model = this.subject();
-
-  assert.expect(4);
-  Ember.run(function () {
-model.set("totalVmemAllocatedContainersMB", 4096);
-model.set("totalPmemAllocatedContainersMB", 2048);
-model.set("totalVCoresAllocatedContainers", 4);
-model.set("hadoopBuildVersion", "3.0.0-SNAPSHOT");
-assert.equal(model.get("totalVmemAllocatedContainersMB"), 4096);
-assert.equal(model.get("totalPmemAllocatedContainersMB"), 2048);
-assert.equal(model.get("totalVCoresAllocatedContainers"), 4);
-assert.equal(model.get("hadoopBuildVersion"), "3.0.0-SNAPSHOT");
-  });
-});
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
deleted file mode 100644
index 4fd2517..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleForModel, test } from 'ember-qunit';
-
-moduleForModel('yarn-rm-node', 'Unit | Model | RMNode', {
-  // Specify the other units that are required for this test.
-  needs: []
-});
-
-test('Basic creation test', function(assert) {
-  let model = this.subject();
-
-  assert.ok(model);
-  assert.ok(model._notifyProperties);
-  assert.ok(model.didLoad);
-  assert.ok(model.rack);
-  assert.ok(model.state);
-  assert.ok(model.nodeHostName);
-  assert.ok(model.nodeHTTPAddress);
-  assert.ok(model.lastHealthUpdate);
-  assert.ok(model.healthReport);
-  assert.ok(model.numContainers);
-  assert.ok(model.usedMemoryMB);
-  assert.ok(model.availMemoryMB);
-  assert.ok(model.usedVirtualCores);
-  assert.ok(model.availableVirtualCores);
-  assert.ok(model.version);
-  assert.ok(model.nodeLabels);
-  

[22/50] [abbrv] hadoop git commit: HADOOP-13446. Support running isolated unit tests separate from AWS integration tests. Contributed by Chris Nauroth.

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9c346e/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java
new file mode 100644
index 000..b0b8a65
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.hadoop.util.StopWatch;
+import org.junit.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+/**
+ * Basic unit test for S3A's blocking executor service.
+ */
+public class ITestBlockingThreadPoolExecutorService {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+  BlockingThreadPoolExecutorService.class);
+
+  private static final int NUM_ACTIVE_TASKS = 4;
+  private static final int NUM_WAITING_TASKS = 2;
+  private static final int TASK_SLEEP_MSEC = 100;
+  private static final int SHUTDOWN_WAIT_MSEC = 200;
+  private static final int SHUTDOWN_WAIT_TRIES = 5;
+  private static final int BLOCKING_THRESHOLD_MSEC = 50;
+
+  private static final Integer SOME_VALUE = 1337;
+
+  private static BlockingThreadPoolExecutorService tpe = null;
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+ensureDestroyed();
+  }
+
+  /**
+   * Basic test of running one trivial task.
+   */
+  @Test
+  public void testSubmitCallable() throws Exception {
+ensureCreated();
+ListenableFuture f = tpe.submit(callableSleeper);
+Integer v = f.get();
+assertEquals(SOME_VALUE, v);
+  }
+
+  /**
+   * More involved test, including detecting blocking when at capacity.
+   */
+  @Test
+  public void testSubmitRunnable() throws Exception {
+ensureCreated();
+int totalTasks = NUM_ACTIVE_TASKS + NUM_WAITING_TASKS;
+StopWatch stopWatch = new StopWatch().start();
+for (int i = 0; i < totalTasks; i++) {
+  tpe.submit(sleeper);
+  assertDidntBlock(stopWatch);
+}
+tpe.submit(sleeper);
+assertDidBlock(stopWatch);
+  }
+
+  @Test
+  public void testShutdown() throws Exception {
+// Cover create / destroy, regardless of when this test case runs
+ensureCreated();
+ensureDestroyed();
+
+// Cover create, execute, destroy, regardless of when test case runs
+ensureCreated();
+testSubmitRunnable();
+ensureDestroyed();
+  }
+
+  // Helper functions, etc.
+
+  private void assertDidntBlock(StopWatch sw) {
+try {
+  assertFalse("Non-blocking call took too long.",
+  sw.now(TimeUnit.MILLISECONDS) > BLOCKING_THRESHOLD_MSEC);
+} finally {
+  sw.reset().start();
+}
+  }
+
+  private void assertDidBlock(StopWatch sw) {
+try {
+  if (sw.now(TimeUnit.MILLISECONDS) < BLOCKING_THRESHOLD_MSEC) {
+throw new RuntimeException("Blocking call returned too fast.");
+  }
+} finally {
+  sw.reset().start();
+}
+  }
+
+  private Runnable sleeper = new Runnable() {
+@Override
+public void run() {
+  String name = Thread.currentThread().getName();
+  try {
+Thread.sleep(TASK_SLEEP_MSEC);
+  } catch (InterruptedException e) {
+LOG.info("Thread {} interrupted.", name);
+Thread.currentThread().interrupt();
+  }
+}
+  };
+
+  private Callable callableSleeper = new Callable() {
+@Override
+public Integer call() throws Exception {
+  sleeper.run();
+  return SOME_VALUE;
+}
+  };
+
+  /**
+   * Helper function to create thread pool under test.
+   */
+  private static void ensureCreated() throws Exception {
+if (tpe == null) {
+  LOG.debug("Creating thread pool");
+  tpe 

[15/50] [abbrv] hadoop git commit: YARN-5544. TestNodeBlacklistingOnAMFailures fails on trunk. Contributed by Sunil G.

2016-08-23 Thread wangda
YARN-5544. TestNodeBlacklistingOnAMFailures fails on trunk. Contributed by 
Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d5997d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d5997d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d5997d2

Branch: refs/heads/YARN-3368
Commit: 0d5997d2b98eb89e72828dfcd78f02aa4e7e1e67
Parents: 8cc4a67
Author: Rohith Sharma K S 
Authored: Tue Aug 23 10:33:28 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Aug 23 14:37:39 2016 +0530

--
 .../TestNodeBlacklistingOnAMFailures.java   | 16 
 1 file changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d5997d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
index ef6d43b..7a24b7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
@@ -68,6 +68,9 @@ public class TestNodeBlacklistingOnAMFailures {
 MockRM rm = startRM(conf, dispatcher);
 CapacityScheduler scheduler = (CapacityScheduler) 
rm.getResourceScheduler();
 
+// Register 5 nodes, so that we can blacklist atleast one if AM container
+// is failed. As per calculation it will be like, 5nodes * 0.2 (default)=1.
+// First register 2 nodes, and after AM lauched register 3 more nodes.
 MockNM nm1 =
 new MockNM("127.0.0.1:1234", 8000, rm.getResourceTrackerService());
 nm1.registerNode();
@@ -93,6 +96,19 @@ public class TestNodeBlacklistingOnAMFailures {
   otherNode = nm1;
 }
 
+// register 3 nodes now
+MockNM nm3 =
+new MockNM("127.0.0.3:2345", 8000, rm.getResourceTrackerService());
+nm3.registerNode();
+
+MockNM nm4 =
+new MockNM("127.0.0.4:2345", 8000, rm.getResourceTrackerService());
+nm4.registerNode();
+
+MockNM nm5 =
+new MockNM("127.0.0.5:2345", 8000, rm.getResourceTrackerService());
+nm5.registerNode();
+
 // Set the exist status to INVALID so that we can verify that the system
 // automatically blacklisting the node
 makeAMContainerExit(rm, amContainerId, currentNode,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: YARN-4515. [YARN-3368] Support hosting web UI framework inside YARN RM. (Sunil G via wangda) YARN-5000. [YARN-3368] App attempt page is not loading when timeline ser

2016-08-23 Thread wangda
YARN-4515. [YARN-3368] Support hosting web UI framework inside YARN RM. (Sunil 
G via wangda)
YARN-5000. [YARN-3368] App attempt page is not loading when timeline server is 
not started (Sunil G via wangda)
YARN-5038. [YARN-3368] Application and Container pages shows wrong values when 
RM is stopped. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b242df75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b242df75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b242df75

Branch: refs/heads/YARN-3368
Commit: b242df75dff5d219277174a930f52b014ec87c57
Parents: f8f7cab
Author: Wangda Tan 
Authored: Tue May 17 22:28:24 2016 -0700
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 LICENSE.txt |  2 +
 .../resources/assemblies/hadoop-yarn-dist.xml   |  7 ++
 .../hadoop/yarn/conf/YarnConfiguration.java | 23 ++
 .../src/main/resources/yarn-default.xml | 26 +++
 .../server/resourcemanager/ResourceManager.java | 76 +---
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  |  4 +-
 .../webapp/app/adapters/yarn-app-attempt.js |  4 +-
 .../webapp/app/adapters/yarn-container-log.js   |  2 +-
 .../main/webapp/app/adapters/yarn-node-app.js   | 10 ++-
 .../webapp/app/adapters/yarn-node-container.js  | 10 ++-
 .../src/main/webapp/app/adapters/yarn-node.js   |  5 +-
 .../main/webapp/app/components/timeline-view.js | 17 +++--
 .../main/webapp/app/components/tree-selector.js |  4 +-
 .../main/webapp/app/helpers/log-files-comma.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/helpers/node-menu.js|  6 +-
 .../src/main/webapp/app/helpers/node-name.js| 46 
 .../main/webapp/app/models/yarn-app-attempt.js  | 72 ++-
 .../src/main/webapp/app/models/yarn-app.js  | 14 
 .../main/webapp/app/models/yarn-container.js|  7 ++
 .../main/webapp/app/routes/yarn-app-attempt.js  |  6 +-
 .../webapp/app/serializers/yarn-app-attempt.js  |  5 +-
 .../src/main/webapp/app/serializers/yarn-app.js | 11 ++-
 .../webapp/app/serializers/yarn-container.js|  3 +-
 .../webapp/app/serializers/yarn-node-app.js |  5 +-
 .../app/serializers/yarn-node-container.js  |  5 +-
 .../main/webapp/app/serializers/yarn-rm-node.js |  5 +-
 .../main/webapp/app/templates/application.hbs   | 21 +-
 .../templates/components/app-attempt-table.hbs  | 22 +-
 .../app/templates/components/app-table.hbs  |  8 +--
 .../templates/components/container-table.hbs|  4 +-
 .../templates/components/node-menu-panel.hbs| 44 
 .../app/templates/components/timeline-view.hbs  |  2 +-
 .../src/main/webapp/app/templates/error.hbs |  2 +-
 .../webapp/app/templates/yarn-app-attempt.hbs   |  4 ++
 .../src/main/webapp/app/templates/yarn-app.hbs  |  2 +-
 .../src/main/webapp/app/templates/yarn-apps.hbs |  9 ++-
 .../main/webapp/app/templates/yarn-node-app.hbs |  4 +-
 .../webapp/app/templates/yarn-node-apps.hbs | 12 ++--
 .../app/templates/yarn-node-container.hbs   |  2 +-
 .../app/templates/yarn-node-containers.hbs  | 12 ++--
 .../src/main/webapp/app/templates/yarn-node.hbs |  2 +-
 .../main/webapp/app/templates/yarn-nodes.hbs| 10 ++-
 .../main/webapp/app/templates/yarn-queue.hbs|  8 ++-
 .../src/main/webapp/config/environment.js   |  2 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |  2 +
 .../webapp/tests/unit/helpers/node-name-test.js | 28 
 47 files changed, 486 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b242df75/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 45b6cdf..5efbd14 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1882,6 +1882,7 @@ The Apache Hadoop YARN Web UI component bundles the 
following files under the MI
  - datatables v1.10.8 (https://datatables.net/)
  - moment v2.10.6 (http://momentjs.com/) - Copyright (c) 2011-2015 Tim Wood, 
Iskren Chernev, Moment.js contributors
  - em-helpers v0.5.8 (https://github.com/sreenaths/em-helpers)
+ - ember-array-contains-helper v1.0.2 
(https://github.com/bmeurant/ember-array-contains-helper)
  - ember-cli-app-version v0.5.8 
(https://github.com/EmberSherpa/ember-cli-app-version) - Authored by Taras 
Mankovski 
  - ember-cli-babel v5.1.6 (https://github.com/babel/ember-cli-babel) - 
Authored by Stefan Penner 
  - ember-cli-content-security-policy v0.4.0 
(https://github.com/rwjblue/ember-cli-content-security-policy)
@@ -1895,6 +1896,7 @@ The Apache Hadoop YARN Web UI component bundles the 
following files under the MI
  - ember-cli-sri 

[49/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-08-23 Thread wangda
YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d0e0a76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d0e0a76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d0e0a76

Branch: refs/heads/YARN-3368
Commit: 3d0e0a76f15dd343b2a30df3d09bcd25c2dab1c3
Parents: a504ec9
Author: Wangda Tan 
Authored: Mon Mar 21 13:13:02 2016 -0700
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |   5 +-
 .../app/adapters/cluster-metric.js  |   5 +-
 .../app/adapters/yarn-app-attempt.js|   3 +-
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |   3 +-
 .../app/adapters/yarn-container-log.js  |  74 +
 .../app/adapters/yarn-container.js  |   5 +-
 .../app/adapters/yarn-node-app.js   |  63 
 .../app/adapters/yarn-node-container.js |  64 
 .../hadoop-yarn-ui/app/adapters/yarn-node.js|  40 +
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |   3 +-
 .../hadoop-yarn-ui/app/adapters/yarn-rm-node.js |  45 ++
 .../app/components/simple-table.js  |  38 -
 .../hadoop-yarn/hadoop-yarn-ui/app/config.js|  27 
 .../hadoop-yarn/hadoop-yarn-ui/app/constants.js |  24 +++
 .../app/controllers/application.js  |  55 +++
 .../hadoop-yarn-ui/app/helpers/divide.js|  31 
 .../app/helpers/log-files-comma.js  |  48 ++
 .../hadoop-yarn-ui/app/helpers/node-link.js |  37 +
 .../hadoop-yarn-ui/app/helpers/node-menu.js |  66 
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  14 +-
 .../app/models/yarn-container-log.js|  25 +++
 .../hadoop-yarn-ui/app/models/yarn-node-app.js  |  44 ++
 .../app/models/yarn-node-container.js   |  57 +++
 .../hadoop-yarn-ui/app/models/yarn-node.js  |  33 
 .../hadoop-yarn-ui/app/models/yarn-rm-node.js   |  92 +++
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  13 ++
 .../hadoop-yarn-ui/app/routes/application.js|  38 +
 .../hadoop-yarn-ui/app/routes/index.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-apps.js  |   4 +-
 .../app/routes/yarn-container-log.js|  55 +++
 .../hadoop-yarn-ui/app/routes/yarn-node-app.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-node-apps.js |  29 
 .../app/routes/yarn-node-container.js   |  30 
 .../app/routes/yarn-node-containers.js  |  28 
 .../hadoop-yarn-ui/app/routes/yarn-node.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-nodes.js |  25 +++
 .../app/serializers/yarn-container-log.js   |  39 +
 .../app/serializers/yarn-node-app.js|  86 +++
 .../app/serializers/yarn-node-container.js  |  74 +
 .../hadoop-yarn-ui/app/serializers/yarn-node.js |  56 +++
 .../app/serializers/yarn-rm-node.js |  77 ++
 .../app/templates/application.hbs   |   4 +-
 .../hadoop-yarn-ui/app/templates/error.hbs  |  19 +++
 .../hadoop-yarn-ui/app/templates/notfound.hbs   |  20 +++
 .../hadoop-yarn-ui/app/templates/yarn-apps.hbs  |   4 +-
 .../app/templates/yarn-container-log.hbs|  36 +
 .../app/templates/yarn-node-app.hbs |  60 
 .../app/templates/yarn-node-apps.hbs|  51 +++
 .../app/templates/yarn-node-container.hbs   |  70 +
 .../app/templates/yarn-node-containers.hbs  |  58 +++
 .../hadoop-yarn-ui/app/templates/yarn-node.hbs  |  94 
 .../hadoop-yarn-ui/app/templates/yarn-nodes.hbs |  65 
 .../hadoop-yarn-ui/app/utils/converter.js   |  21 ++-
 .../hadoop-yarn-ui/app/utils/sorter.js  |  42 -
 .../hadoop-yarn/hadoop-yarn-ui/bower.json   |   2 +-
 .../hadoop-yarn-ui/config/environment.js|   1 -
 .../unit/adapters/yarn-container-log-test.js|  73 +
 .../tests/unit/adapters/yarn-node-app-test.js   |  93 +++
 .../unit/adapters/yarn-node-container-test.js   |  93 +++
 .../tests/unit/adapters/yarn-node-test.js   |  42 +
 .../tests/unit/adapters/yarn-rm-node-test.js|  44 ++
 .../unit/models/yarn-container-log-test.js  |  48 ++
 .../tests/unit/models/yarn-node-app-test.js |  65 
 .../unit/models/yarn-node-container-test.js |  78 ++
 .../tests/unit/models/yarn-node-test.js |  58 +++
 .../tests/unit/models/yarn-rm-node-test.js  |  95 
 .../unit/routes/yarn-container-log-test.js  | 120 +++
 .../tests/unit/routes/yarn-node-app-test.js |  56 +++
 .../tests/unit/routes/yarn-node-apps-test.js|  60 
 .../unit/routes/yarn-node-container-test.js |  

[31/50] [abbrv] hadoop git commit: YARN-5019. [YARN-3368] Change urls in new YARN ui from camel casing to hyphens. (Sunil G via wangda)

2016-08-23 Thread wangda
YARN-5019. [YARN-3368] Change urls in new YARN ui from camel casing to hyphens. 
(Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8f7cabc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8f7cabc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8f7cabc

Branch: refs/heads/YARN-3368
Commit: f8f7cabc67f1252d3f4ffaf0c56a27f650a4458b
Parents: dfb5eda
Author: Wangda Tan 
Authored: Mon May 9 11:29:59 2016 -0700
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 .../main/webapp/app/components/tree-selector.js |  4 +--
 .../main/webapp/app/controllers/application.js  | 16 +-
 .../main/webapp/app/helpers/log-files-comma.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/helpers/node-menu.js| 12 
 .../main/webapp/app/models/yarn-app-attempt.js  |  2 +-
 .../src/main/webapp/app/router.js   | 32 ++--
 .../src/main/webapp/app/routes/index.js |  2 +-
 .../main/webapp/app/routes/yarn-app-attempt.js  |  6 ++--
 .../src/main/webapp/app/routes/yarn-app.js  |  4 +--
 .../src/main/webapp/app/routes/yarn-apps.js |  2 +-
 .../webapp/app/routes/yarn-container-log.js |  2 +-
 .../src/main/webapp/app/routes/yarn-node-app.js |  2 +-
 .../main/webapp/app/routes/yarn-node-apps.js|  2 +-
 .../webapp/app/routes/yarn-node-container.js|  2 +-
 .../webapp/app/routes/yarn-node-containers.js   |  2 +-
 .../src/main/webapp/app/routes/yarn-node.js |  4 +--
 .../src/main/webapp/app/routes/yarn-nodes.js|  2 +-
 .../src/main/webapp/app/routes/yarn-queue.js|  6 ++--
 .../main/webapp/app/routes/yarn-queues/index.js |  2 +-
 .../app/routes/yarn-queues/queues-selector.js   |  2 +-
 .../app/templates/components/app-table.hbs  |  4 +--
 .../webapp/app/templates/yarn-container-log.hbs |  2 +-
 .../main/webapp/app/templates/yarn-node-app.hbs |  4 +--
 .../webapp/app/templates/yarn-node-apps.hbs |  4 +--
 .../app/templates/yarn-node-container.hbs   |  2 +-
 .../app/templates/yarn-node-containers.hbs  |  4 +--
 .../src/main/webapp/app/templates/yarn-node.hbs |  2 +-
 28 files changed, 66 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f7cabc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
index f7ec020..698c253 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -126,7 +126,7 @@ export default Ember.Component.extend({
   .attr("transform", function(d) { return "translate(" + source.y0 + "," + 
source.x0 + ")"; })
   .on("click", function(d,i){
 if (d.queueData.get("name") != this.get("selected")) {
-document.location.href = "yarnQueue/" + d.queueData.get("name");
+document.location.href = "yarn-queue/" + d.queueData.get("name");
 }
   }.bind(this));
   // .on("click", click);
@@ -176,7 +176,7 @@ export default Ember.Component.extend({
   .attr("r", 20)
   .attr("href", 
 function(d) {
-  return "yarnQueues/" + d.queueData.get("name");
+  return "yarn-queues/" + d.queueData.get("name");
 })
   .style("stroke", function(d) {
 if (d.queueData.get("name") == this.get("selected")) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f7cabc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
index 3c68365..2effb13 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
@@ -29,25 +29,25 @@ export default Ember.Controller.extend({
   outputMainMenu: function(){
 var path = this.get('currentPath');
 var html = 'Queues' +
+html = html + '>Queues' +
 '(current)

[03/50] [abbrv] hadoop git commit: HADOOP-13428. Fixed hadoop-common build files to generate jdiff by working around a jdiff-bug. Contributed by Wangda Tan.

2016-08-23 Thread wangda
HADOOP-13428. Fixed hadoop-common build files to generate jdiff by working 
around a jdiff-bug. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99603e90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99603e90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99603e90

Branch: refs/heads/YARN-3368
Commit: 99603e902244f17b04cfd55122f47355d070b588
Parents: 2da32a6
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Authored: Fri Aug 19 19:08:53 2016 -0700
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Committed: Fri Aug 19 19:08:53 2016 -0700

--
 .../dev-support/jdiff-workaround.patch  |98 +
 .../jdiff/Apache_Hadoop_Common_2.7.2.xml| 46648 +
 hadoop-common-project/hadoop-common/pom.xml | 2 +
 hadoop-project-dist/pom.xml |44 +-
 4 files changed, 46789 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99603e90/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch 
b/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch
new file mode 100644
index 000..8f87d40
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch
@@ -0,0 +1,98 @@
+diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
+index a277abd..ed7c709 100644
+--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
+@@ -43,18 +43,6 @@
+   public abstract MetricsSystem init(String prefix);
+
+   /**
+-   * Register a metrics source
+-   * @paramthe actual type of the source object
+-   * @param source object to register
+-   * @param name  of the source. Must be unique or null (then extracted from
+-   *  the annotations of the source object.)
+-   * @param desc  the description of the source (or null. See above.)
+-   * @return the source object
+-   * @exception MetricsException
+-   */
+-  public abstract  T register(String name, String desc, T source);
+-
+-  /**
+* Unregister a metrics source
+* @param name of the source. This is the name you use to call register()
+*/
+@@ -77,18 +65,19 @@
+*/
+   @InterfaceAudience.Private
+   public abstract MetricsSource getSource(String name);
++
+
+   /**
+-   * Register a metrics sink
+-   * @paramthe type of the sink
+-   * @param sink  to register
+-   * @param name  of the sink. Must be unique.
+-   * @param desc  the description of the sink
+-   * @return the sink
++   * Register a metrics source
++   * @paramthe actual type of the source object
++   * @param source object to register
++   * @param name  of the source. Must be unique or null (then extracted from
++   *  the annotations of the source object.)
++   * @param desc  the description of the source (or null. See above.)
++   * @return the source object
+* @exception MetricsException
+*/
+-  public abstract 
+-  T register(String name, String desc, T sink);
++  public abstract  T register(String name, String desc, T source);
+
+   /**
+* Register a callback interface for JMX events
+diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+index 6986edb..eeea81f 100644
+--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+@@ -270,27 +270,6 @@ void registerSource(String name, String desc, 
MetricsSource source) {
+ LOG.debug("Registered source "+ name);
+   }
+
+-  @Override public synchronized 
+-  T register(final String name, final String description, final T sink) {
+-LOG.debug(name +", "+ description);
+-if (allSinks.containsKey(name)) {
+-  LOG.warn("Sink "+ name +" already exists!");
+-  return sink;
+-}
+-allSinks.put(name, sink);
+-if (config != null) {
+-  registerSink(name, description, sink);
+-}
+-// We want to re-register the sink to pick up new config
+-// when the metrics system restarts.
+-register(name, new AbstractCallback() {

[42/50] [abbrv] hadoop git commit: YARN-5183. [YARN-3368] Support for responsive navbar when window is resized. (Kai Sasaki via Sunil G)

2016-08-23 Thread wangda
YARN-5183. [YARN-3368] Support for responsive navbar when window is resized. 
(Kai Sasaki via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7929e931
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7929e931
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7929e931

Branch: refs/heads/YARN-3368
Commit: 7929e931a43e55a67d6054fb47ef5b01d83c17a9
Parents: b242df7
Author: Sunil 
Authored: Fri Jun 10 10:33:41 2016 +0530
Committer: Wangda Tan 
Committed: Tue Aug 23 10:31:13 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7929e931/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index bce18ce..d21cc3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -32,6 +32,9 @@ module.exports = function(defaults) {
   app.import("bower_components/select2/dist/js/select2.min.js");
   app.import('bower_components/jquery-ui/jquery-ui.js');
   app.import('bower_components/more-js/dist/more.js');
+  app.import('bower_components/bootstrap/dist/css/bootstrap.css');
+  app.import('bower_components/bootstrap/dist/css/bootstrap-theme.css');
+  app.import('bower_components/bootstrap/dist/js/bootstrap.min.js');
 
   // Use `app.import` to add additional libraries to the generated
   // output files.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
deleted file mode 100644
index c5394d0..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
+++ /dev/null
@@ -1,49 +0,0 @@
-import DS from 'ember-data';
-import Converter from 'yarn-ui/utils/converter';
-
-export default DS.JSONAPISerializer.extend({
-internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  
-  if (payload.appAttempt) {
-payload = payload.appAttempt;  
-  }
-  
-  var fixedPayload = {
-id: payload.appAttemptId,
-type: primaryModelClass.modelName, // yarn-app
-attributes: {
-  startTime: Converter.timeStampToDate(payload.startTime),
-  finishedTime: Converter.timeStampToDate(payload.finishedTime),
-  containerId: payload.containerId,
-  nodeHttpAddress: payload.nodeHttpAddress,
-  nodeId: payload.nodeId,
-  state: payload.nodeId,
-  logsLink: payload.logsLink
-}
-  };
-
-  return fixedPayload;
-},
-
-normalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  var p = this.internalNormalizeSingleResponse(store, 
-primaryModelClass, payload, id, requestType);
-  return { data: p };
-},
-
-normalizeArrayResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  // return expected is { data: [ {}, {} ] }
-  var normalizedArrayResponse = {};
-
-  // payload has apps : { app: [ {},{},{} ]  }
-  // need some error handling for ex apps or app may not be defined.
-  normalizedArrayResponse.data = 
payload.appAttempts.appAttempt.map(singleApp => {
-return this.internalNormalizeSingleResponse(store, primaryModelClass,
-  singleApp, singleApp.id, requestType);
-  }, this);
-  return normalizedArrayResponse;
-}
-});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
deleted file mode 100644
index a038fff..000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
+++ /dev/null
@@ -1,66 +0,0 @@
-import DS from 'ember-data';
-import Converter from 'yarn-ui/utils/converter';
-
-export default DS.JSONAPISerializer.extend({
-internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  if (payload.app) {
-payload = payload.app;  
-  }
-  
-  var fixedPayload = {
-id: id,
-type: primaryModelClass.modelName, // yarn-app
-attributes: {
-  appName: payload.name,
-  user: payload.user,
-  queue: payload.queue,
-  state: payload.state,
-  startTime: Converter.timeStampToDate(payload.startedTime),
-  elapsedTime: Converter.msToElapsedTime(payload.elapsedTime),
-  finishedTime: Converter.timeStampToDate(payload.finishedTime),
-  finalStatus: payload.finalStatus,
-  progress: payload.progress,
-  diagnostics: payload.diagnostics,
-  amContainerLogs: payload.amContainerLogs,
-  amHostHttpAddress: payload.amHostHttpAddress,
-  logAggregationStatus: payload.logAggregationStatus,
-  unmanagedApplication: payload.unmanagedApplication,
-  amNodeLabelExpression: payload.amNodeLabelExpression,
-  priority: payload.priority,
-  allocatedMB: payload.allocatedMB,
-  allocatedVCores: payload.allocatedVCores,
-  runningContainers: payload.runningContainers,
-  memorySeconds: payload.memorySeconds,
-  vcoreSeconds: payload.vcoreSeconds,
-  preemptedResourceMB: payload.preemptedResourceMB,
-  preemptedResourceVCores: payload.preemptedResourceVCores,
-  numNonAMContainerPreempted: payload.numNonAMContainerPreempted,
-  numAMContainerPreempted: payload.numAMContainerPreempted
-}
-  };
-
-  return fixedPayload;
-},
-
-normalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  var p = this.internalNormalizeSingleResponse(store, 
-primaryModelClass, payload, id, requestType);
-  return { data: p };
-},
-
-normalizeArrayResponse(store, primaryModelClass, payload, id,
-  requestType) {

[34/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-08-23 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
new file mode 100644
index 000..ca80ccd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
@@ -0,0 +1,58 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+
+  
+{{node-menu path="yarnNodeContainers" nodeAddr=model.nodeInfo.addr 
nodeId=model.nodeInfo.id}}
+
+  
+
+  
+Container ID
+Container State
+User
+Logs
+  
+
+
+  {{#if model.containers}}
+{{#each model.containers as |container|}}
+  {{#if container.isDummyContainer}}
+No containers found on this 
node
+  {{else}}
+
+  {{container.containerId}}
+  {{container.state}}
+  {{container.user}}
+  
+{{log-files-comma nodeId=model.nodeInfo.id
+nodeAddr=model.nodeInfo.addr
+containerId=container.containerId
+logFiles=container.containerLogFiles}}
+  
+
+  {{/if}}
+{{/each}}
+  {{/if}}
+
+  
+  {{simple-table table-id="node-containers-table" bFilter=true 
colsOrder="0,desc" colTypes="natural" colTargets="0"}}
+
+  
+
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6068a841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
new file mode 100644
index 000..a036076
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
@@ -0,0 +1,94 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+
+  
+{{node-menu path="yarnNode" nodeId=model.rmNode.id nodeAddr=model.node.id}}
+
+  
+Node Information
+  
+
+  
+Total Vmem allocated for Containers
+{{divide num=model.node.totalVmemAllocatedContainersMB 
den=1024}} GB
+  
+  
+Vmem enforcement enabled
+{{model.node.vmemCheckEnabled}}
+  
+  
+Total Pmem allocated for Containers
+{{divide num=model.node.totalPmemAllocatedContainersMB 
den=1024}} GB
+  
+  
+Pmem enforcement enabled
+{{model.node.pmemCheckEnabled}}
+  
+  
+Total VCores allocated for Containers
+{{model.node.totalVCoresAllocatedContainers}}
+  
+  
+Node Healthy Status
+

[13/50] [abbrv] hadoop git commit: HDFS-8312. Added permission check for moving file to Trash. (Weiwei Yang via Eric Yang)

2016-08-23 Thread wangda
HDFS-8312. Added permission check for moving file to Trash. (Weiwei Yang via 
Eric Yang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c49333be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c49333be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c49333be

Branch: refs/heads/YARN-3368
Commit: c49333becfa7652460976a61eb86522010bcfeed
Parents: 4070caa
Author: Eric Yang 
Authored: Mon Aug 22 18:29:56 2016 -0700
Committer: Eric Yang 
Committed: Mon Aug 22 18:29:56 2016 -0700

--
 .../main/java/org/apache/hadoop/fs/Options.java |  3 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java| 10 ++-
 .../ClientNamenodeProtocolTranslatorPB.java |  7 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 14 +++-
 .../hdfs/server/namenode/FSDirRenameOp.java | 28 +--
 .../apache/hadoop/hdfs/TestDFSPermission.java   | 81 
 7 files changed, 132 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
index da75d1c..dc50286 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
@@ -213,7 +213,8 @@ public final class Options {
*/
   public static enum Rename {
 NONE((byte) 0), // No options
-OVERWRITE((byte) 1); // Overwrite the rename destination
+OVERWRITE((byte) 1), // Overwrite the rename destination
+TO_TRASH ((byte) 2); // Rename to trash
 
 private final byte code;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 14f4c0c..66ef890 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -106,6 +106,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 return deletionInterval != 0;
   }
 
+  @SuppressWarnings("deprecation")
   @Override
   public boolean moveToTrash(Path path) throws IOException {
 if (!isEnabled())
@@ -156,10 +157,11 @@ public class TrashPolicyDefault extends TrashPolicy {
   trashPath = new Path(orig + Time.now());
 }
 
-if (fs.rename(path, trashPath)) {   // move to current trash
-  LOG.info("Moved: '" + path + "' to trash at: " + trashPath);
-  return true;
-}
+// move to current trash
+fs.rename(path, trashPath,
+Rename.TO_TRASH);
+LOG.info("Moved: '" + path + "' to trash at: " + trashPath);
+return true;
   } catch (IOException e) {
 cause = e;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c49333be/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index bcf5269..57f8fd6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -523,16 +523,21 @@ public class ClientNamenodeProtocolTranslatorPB implements
   public void rename2(String src, String dst, Rename... options)
   throws IOException {
 boolean overwrite = false;
+boolean toTrash = false;
 if (options != null) {
   for (Rename option : options) {
 if (option == Rename.OVERWRITE) {
   overwrite = true;
+} else if (option == Rename.TO_TRASH) {
+  toTrash = true;
 }
   }
 }
 

  1   2   >