hadoop git commit: HDFS-11009. Add a tool to reconstruct block meta file from CLI.

2016-10-18 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 190a53b89 -> d22a6a8b8


HDFS-11009. Add a tool to reconstruct block meta file from CLI.

(cherry picked from commit bc4a32aea50e86819730312e89315c0244ce64bf)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java

(cherry picked from commit ad7d3c4db8bfab007cc2ec1bad3c388fd7144369)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d22a6a8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d22a6a8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d22a6a8b

Branch: refs/heads/branch-2.8
Commit: d22a6a8b8250b86d95930413978bd3396834
Parents: 190a53b
Author: Xiao Chen 
Authored: Tue Oct 18 18:32:27 2016 -0700
Committer: Xiao Chen 
Committed: Tue Oct 18 22:42:09 2016 -0700

--
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |   4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  46 
 .../datanode/fsdataset/impl/FsDatasetUtil.java  |  15 +++
 .../apache/hadoop/hdfs/tools/DebugAdmin.java| 107 +--
 .../src/site/markdown/HDFSCommands.md   |  22 +++-
 .../hadoop/hdfs/tools/TestDebugAdmin.java   |  56 +-
 6 files changed, 209 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d22a6a8b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index 8848f86..316d374 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -287,7 +287,7 @@ public class DfsClientConf {
 return classes;
   }
 
-  private DataChecksum.Type getChecksumType(Configuration conf) {
+  private static DataChecksum.Type getChecksumType(Configuration conf) {
 final String checksum = conf.get(
 DFS_CHECKSUM_TYPE_KEY,
 DFS_CHECKSUM_TYPE_DEFAULT);
@@ -302,7 +302,7 @@ public class DfsClientConf {
   }
 
   // Construct a checksum option from conf
-  private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
+  public static ChecksumOpt getChecksumOptFromConf(Configuration conf) {
 DataChecksum.Type type = getChecksumType(conf);
 int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
 DFS_BYTES_PER_CHECKSUM_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d22a6a8b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 53c6484..d688670 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -756,7 +756,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 }
 return f;
   }
-  
+
   /**
* Return the File associated with a block, without first
* checking that it exists. This should be used when the
@@ -812,7 +812,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 }
 return info;
   }
-  
+
   /**
* Get the meta info of a block stored in volumeMap. Block is looked up
* without matching the generation stamp.
@@ -831,7 +831,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 }
 return info;
   }
-  
+
   /**
* Returns handles to the block file and its metadata file
*/
@@ -1021,7 +1021,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
* @param blockFile block file for which the checksum will be computed
* @throws IOException
*/
-  private static void computeChecksum(File srcMeta, File dstMeta,
+  static void computeChecksum(File srcMeta, File dstMeta,
   File blockFile, int smallBufferSize, final Configuration conf)
   throws IOException {
 final DataChecksum checksum = 

hadoop git commit: HDFS-11009. Add a tool to reconstruct block meta file from CLI.

2016-10-18 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 74f1c49d7 -> ad7d3c4db


HDFS-11009. Add a tool to reconstruct block meta file from CLI.

(cherry picked from commit bc4a32aea50e86819730312e89315c0244ce64bf)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad7d3c4d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad7d3c4d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad7d3c4d

Branch: refs/heads/branch-2
Commit: ad7d3c4db8bfab007cc2ec1bad3c388fd7144369
Parents: 74f1c49
Author: Xiao Chen 
Authored: Tue Oct 18 18:32:27 2016 -0700
Committer: Xiao Chen 
Committed: Tue Oct 18 22:40:58 2016 -0700

--
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |   4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  46 
 .../datanode/fsdataset/impl/FsDatasetUtil.java  |  15 +++
 .../apache/hadoop/hdfs/tools/DebugAdmin.java| 107 +--
 .../src/site/markdown/HDFSCommands.md   |  22 +++-
 .../hadoop/hdfs/tools/TestDebugAdmin.java   |  56 +-
 6 files changed, 209 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad7d3c4d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index 8848f86..316d374 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -287,7 +287,7 @@ public class DfsClientConf {
 return classes;
   }
 
-  private DataChecksum.Type getChecksumType(Configuration conf) {
+  private static DataChecksum.Type getChecksumType(Configuration conf) {
 final String checksum = conf.get(
 DFS_CHECKSUM_TYPE_KEY,
 DFS_CHECKSUM_TYPE_DEFAULT);
@@ -302,7 +302,7 @@ public class DfsClientConf {
   }
 
   // Construct a checksum option from conf
-  private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
+  public static ChecksumOpt getChecksumOptFromConf(Configuration conf) {
 DataChecksum.Type type = getChecksumType(conf);
 int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
 DFS_BYTES_PER_CHECKSUM_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad7d3c4d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 4f09525..0b55e7e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -758,7 +758,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 }
 return f;
   }
-  
+
   /**
* Return the File associated with a block, without first
* checking that it exists. This should be used when the
@@ -814,7 +814,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 }
 return info;
   }
-  
+
   /**
* Get the meta info of a block stored in volumeMap. Block is looked up
* without matching the generation stamp.
@@ -833,7 +833,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 }
 return info;
   }
-  
+
   /**
* Returns handles to the block file and its metadata file
*/
@@ -1023,7 +1023,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
* @param blockFile block file for which the checksum will be computed
* @throws IOException
*/
-  private static void computeChecksum(File srcMeta, File dstMeta,
+  static void computeChecksum(File srcMeta, File dstMeta,
   File blockFile, int smallBufferSize, final Configuration conf)
   throws IOException {
 final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(srcMeta,
@@ -1088,20 +1088,20 @@ class FsDatasetImpl 

hadoop git commit: HDFS-11009. Add a tool to reconstruct block meta file from CLI.

2016-10-18 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4bca38524 -> c5573e6a7


HDFS-11009. Add a tool to reconstruct block meta file from CLI.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5573e6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5573e6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5573e6a

Branch: refs/heads/trunk
Commit: c5573e6a7599da17cad733cd274e7a9b75b22bb0
Parents: 4bca385
Author: Xiao Chen 
Authored: Tue Oct 18 18:32:27 2016 -0700
Committer: Xiao Chen 
Committed: Tue Oct 18 22:42:28 2016 -0700

--
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |   4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   2 +-
 .../datanode/fsdataset/impl/FsDatasetUtil.java  |  32 ++
 .../apache/hadoop/hdfs/tools/DebugAdmin.java| 107 +--
 .../src/site/markdown/HDFSCommands.md   |  22 +++-
 .../hadoop/hdfs/tools/TestDebugAdmin.java   |  56 +-
 6 files changed, 204 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5573e6a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index 4f4c7b2..b2fd487 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -284,7 +284,7 @@ public class DfsClientConf {
 return classes;
   }
 
-  private DataChecksum.Type getChecksumType(Configuration conf) {
+  private static DataChecksum.Type getChecksumType(Configuration conf) {
 final String checksum = conf.get(
 DFS_CHECKSUM_TYPE_KEY,
 DFS_CHECKSUM_TYPE_DEFAULT);
@@ -299,7 +299,7 @@ public class DfsClientConf {
   }
 
   // Construct a checksum option from conf
-  private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
+  public static ChecksumOpt getChecksumOptFromConf(Configuration conf) {
 DataChecksum.Type type = getChecksumType(conf);
 int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
 DFS_BYTES_PER_CHECKSUM_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5573e6a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index fd747bd..ba653ac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1031,7 +1031,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
* @param conf the {@link Configuration}
* @throws IOException
*/
-  private static void computeChecksum(ReplicaInfo srcReplica, File dstMeta,
+  static void computeChecksum(ReplicaInfo srcReplica, File dstMeta,
   int smallBufferSize, final Configuration conf)
   throws IOException {
 File srcMeta = new File(srcReplica.getMetadataURI());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5573e6a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
index a4d433d..563f66a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
@@ -21,13 +21,19 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FilenameFilter;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.RandomAccessFile;
+import java.net.URI;
 import java.util.Arrays;
 
+import 

hadoop git commit: HDFS-10920. TestStorageMover#testNoSpaceDisk is failing intermittently. Contributed by Rakesh R

2016-10-18 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/trunk c023c7488 -> d26a1bb9d


HDFS-10920. TestStorageMover#testNoSpaceDisk is failing intermittently. 
Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d26a1bb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d26a1bb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d26a1bb9

Branch: refs/heads/trunk
Commit: d26a1bb9d60f50763887d66399579bac7ca81982
Parents: c023c74
Author: Kai Zheng 
Authored: Tue Oct 18 14:51:08 2016 +0600
Committer: Kai Zheng 
Committed: Tue Oct 18 14:51:08 2016 +0600

--
 .../hadoop/hdfs/server/mover/TestStorageMover.java | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d26a1bb9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index 92a70a0..1b5bd81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@ -613,8 +613,10 @@ public class TestStorageMover {
   }
 
   private void waitForAllReplicas(int expectedReplicaNum, Path file,
-  DistributedFileSystem dfs) throws Exception {
-for (int i = 0; i < 5; i++) {
+  DistributedFileSystem dfs, int retryCount) throws Exception {
+LOG.info("Waiting for replicas count " + expectedReplicaNum
++ ", file name: " + file);
+for (int i = 0; i < retryCount; i++) {
   LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0,
   BLOCK_SIZE);
   LocatedBlock lb = lbs.get(0);
@@ -664,7 +666,7 @@ public class TestStorageMover {
   for (int i = 0; i < 2; i++) {
 final Path p = new Path(pathPolicyMap.hot, "file" + i);
 DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
-waitForAllReplicas(replication, p, test.dfs);
+waitForAllReplicas(replication, p, test.dfs, 10);
   }
 
   // set all the DISK volume to full
@@ -679,16 +681,17 @@ public class TestStorageMover {
   final Replication r = test.getReplication(file0);
   final short newReplication = (short) 5;
   test.dfs.setReplication(file0, newReplication);
-  Thread.sleep(1);
+  waitForAllReplicas(newReplication, file0, test.dfs, 10);
   test.verifyReplication(file0, r.disk, newReplication - r.disk);
 
   // test creating a cold file and then increase replication
   final Path p = new Path(pathPolicyMap.cold, "foo");
   DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
+  waitForAllReplicas(replication, p, test.dfs, 10);
   test.verifyReplication(p, 0, replication);
 
   test.dfs.setReplication(p, newReplication);
-  Thread.sleep(1);
+  waitForAllReplicas(newReplication, p, test.dfs, 10);
   test.verifyReplication(p, 0, newReplication);
 
   //test move a hot file to warm
@@ -722,7 +725,7 @@ public class TestStorageMover {
   for (int i = 0; i < 2; i++) {
 final Path p = new Path(pathPolicyMap.cold, "file" + i);
 DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
-waitForAllReplicas(replication, p, test.dfs);
+waitForAllReplicas(replication, p, test.dfs, 10);
   }
 
   // set all the ARCHIVE volume to full
@@ -739,7 +742,7 @@ public class TestStorageMover {
 
 final short newReplication = (short) 5;
 test.dfs.setReplication(file0, newReplication);
-Thread.sleep(1);
+waitForAllReplicas(r.archive, file0, test.dfs, 10);
 
 test.verifyReplication(file0, 0, r.archive);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10920. TestStorageMover#testNoSpaceDisk is failing intermittently. Contributed by Rakesh R

2016-10-18 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b2618685f -> 3972bb3bb


HDFS-10920. TestStorageMover#testNoSpaceDisk is failing intermittently. 
Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3972bb3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3972bb3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3972bb3b

Branch: refs/heads/branch-2
Commit: 3972bb3bbabacc42b738590cd8815d216c5cbdd4
Parents: b261868
Author: Kai Zheng 
Authored: Tue Oct 18 15:08:15 2016 +0600
Committer: Kai Zheng 
Committed: Tue Oct 18 15:08:15 2016 +0600

--
 .../hadoop/hdfs/server/mover/TestStorageMover.java | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3972bb3b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index 549dbc8..26032cf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@ -641,8 +641,10 @@ public class TestStorageMover {
   }
 
   private void waitForAllReplicas(int expectedReplicaNum, Path file,
-  DistributedFileSystem dfs) throws Exception {
-for (int i = 0; i < 5; i++) {
+  DistributedFileSystem dfs, int retryCount) throws Exception {
+LOG.info("Waiting for replicas count " + expectedReplicaNum
++ ", file name: " + file);
+for (int i = 0; i < retryCount; i++) {
   LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0,
   BLOCK_SIZE);
   LocatedBlock lb = lbs.get(0);
@@ -692,7 +694,7 @@ public class TestStorageMover {
   for (int i = 0; i < 2; i++) {
 final Path p = new Path(pathPolicyMap.hot, "file" + i);
 DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
-waitForAllReplicas(replication, p, test.dfs);
+waitForAllReplicas(replication, p, test.dfs, 10);
   }
 
   // set all the DISK volume to full
@@ -707,16 +709,17 @@ public class TestStorageMover {
   final Replication r = test.getReplication(file0);
   final short newReplication = (short) 5;
   test.dfs.setReplication(file0, newReplication);
-  Thread.sleep(1);
+  waitForAllReplicas(newReplication, file0, test.dfs, 10);
   test.verifyReplication(file0, r.disk, newReplication - r.disk);
 
   // test creating a cold file and then increase replication
   final Path p = new Path(pathPolicyMap.cold, "foo");
   DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
+  waitForAllReplicas(replication, p, test.dfs, 10);
   test.verifyReplication(p, 0, replication);
 
   test.dfs.setReplication(p, newReplication);
-  Thread.sleep(1);
+  waitForAllReplicas(newReplication, p, test.dfs, 10);
   test.verifyReplication(p, 0, newReplication);
 
   //test move a hot file to warm
@@ -750,7 +753,7 @@ public class TestStorageMover {
   for (int i = 0; i < 2; i++) {
 final Path p = new Path(pathPolicyMap.cold, "file" + i);
 DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
-waitForAllReplicas(replication, p, test.dfs);
+waitForAllReplicas(replication, p, test.dfs, 10);
   }
 
   // set all the ARCHIVE volume to full
@@ -767,7 +770,7 @@ public class TestStorageMover {
 
 final short newReplication = (short) 5;
 test.dfs.setReplication(file0, newReplication);
-Thread.sleep(1);
+waitForAllReplicas(r.archive, file0, test.dfs, 10);
 
 test.verifyReplication(file0, 0, r.archive);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10920. TestStorageMover#testNoSpaceDisk is failing intermittently. Contributed by Rakesh R

2016-10-18 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 ae972dd08 -> d26e11f30


HDFS-10920. TestStorageMover#testNoSpaceDisk is failing intermittently. 
Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d26e11f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d26e11f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d26e11f3

Branch: refs/heads/branch-2.8
Commit: d26e11f30eb15b20ea9f71a0e22d5675d3b04b87
Parents: ae972dd
Author: Kai Zheng 
Authored: Tue Oct 18 15:09:25 2016 +0600
Committer: Kai Zheng 
Committed: Tue Oct 18 15:09:25 2016 +0600

--
 .../hadoop/hdfs/server/mover/TestStorageMover.java | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d26e11f3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index 549dbc8..26032cf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@ -641,8 +641,10 @@ public class TestStorageMover {
   }
 
   private void waitForAllReplicas(int expectedReplicaNum, Path file,
-  DistributedFileSystem dfs) throws Exception {
-for (int i = 0; i < 5; i++) {
+  DistributedFileSystem dfs, int retryCount) throws Exception {
+LOG.info("Waiting for replicas count " + expectedReplicaNum
++ ", file name: " + file);
+for (int i = 0; i < retryCount; i++) {
   LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0,
   BLOCK_SIZE);
   LocatedBlock lb = lbs.get(0);
@@ -692,7 +694,7 @@ public class TestStorageMover {
   for (int i = 0; i < 2; i++) {
 final Path p = new Path(pathPolicyMap.hot, "file" + i);
 DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
-waitForAllReplicas(replication, p, test.dfs);
+waitForAllReplicas(replication, p, test.dfs, 10);
   }
 
   // set all the DISK volume to full
@@ -707,16 +709,17 @@ public class TestStorageMover {
   final Replication r = test.getReplication(file0);
   final short newReplication = (short) 5;
   test.dfs.setReplication(file0, newReplication);
-  Thread.sleep(1);
+  waitForAllReplicas(newReplication, file0, test.dfs, 10);
   test.verifyReplication(file0, r.disk, newReplication - r.disk);
 
   // test creating a cold file and then increase replication
   final Path p = new Path(pathPolicyMap.cold, "foo");
   DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
+  waitForAllReplicas(replication, p, test.dfs, 10);
   test.verifyReplication(p, 0, replication);
 
   test.dfs.setReplication(p, newReplication);
-  Thread.sleep(1);
+  waitForAllReplicas(newReplication, p, test.dfs, 10);
   test.verifyReplication(p, 0, newReplication);
 
   //test move a hot file to warm
@@ -750,7 +753,7 @@ public class TestStorageMover {
   for (int i = 0; i < 2; i++) {
 final Path p = new Path(pathPolicyMap.cold, "file" + i);
 DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
-waitForAllReplicas(replication, p, test.dfs);
+waitForAllReplicas(replication, p, test.dfs, 10);
   }
 
   // set all the ARCHIVE volume to full
@@ -767,7 +770,7 @@ public class TestStorageMover {
 
 final short newReplication = (short) 5;
 test.dfs.setReplication(file0, newReplication);
-Thread.sleep(1);
+waitForAllReplicas(r.archive, file0, test.dfs, 10);
 
 test.verifyReplication(file0, 0, r.archive);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10512. VolumeScanner may terminate due to NPE in DataNode.reportBadBlocks. Contributed by Wei-Chiu Chuang and Yiqun Lin.

2016-10-18 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 86f291f05 -> b2dfab432


HDFS-10512. VolumeScanner may terminate due to NPE in DataNode.reportBadBlocks. 
Contributed by Wei-Chiu Chuang and Yiqun Lin.

(cherry picked from commit da6f1b88dd47e22b24d44f6fc8bbee73e85746f7)
(cherry picked from commit 96e68e722f0fe99d1609b38377014c04ef5b6640)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2dfab43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2dfab43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2dfab43

Branch: refs/heads/branch-2.7
Commit: b2dfab4326605b4b282a3ae75d00223b35979662
Parents: 86f291f
Author: Yongjun Zhang 
Authored: Fri Jul 8 19:40:44 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Oct 18 09:09:06 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   | 23 ++-
 .../hdfs/server/datanode/VolumeScanner.java |  2 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  3 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   | 42 
 5 files changed, 70 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2dfab43/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 277efe1..a13a566 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -154,6 +154,9 @@ Release 2.7.4 - UNRELEASED
 HDFS-11002. Fix broken attr/getfattr/setfattr links in
 ExtendedAttributes.md. (Mingliang Liu via aajisaka)
 
+HDFS-10512. VolumeScanner may terminate due to NPE in 
+DataNode.reportBadBlocks. Contributed by Wei-Chiu Chuang and Yiqun Lin.
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2dfab43/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 9ef23d4..eb159eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -990,8 +990,25 @@ public class DataNode extends ReconfigurableBase
* Report a bad block which is hosted on the local DN.
*/
   public void reportBadBlocks(ExtendedBlock block) throws IOException{
-BPOfferService bpos = getBPOSForBlock(block);
 FsVolumeSpi volume = getFSDataset().getVolume(block);
+if (volume == null) {
+  LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
+  return;
+}
+reportBadBlocks(block, volume);
+  }
+
+  /**
+   * Report a bad block which is hosted on the local DN.
+   *
+   * @param block the bad block which is hosted on the local DN
+   * @param volume the volume that block is stored in and the volume
+   *must not be null
+   * @throws IOException
+   */
+  public void reportBadBlocks(ExtendedBlock block, FsVolumeSpi volume)
+  throws IOException {
+BPOfferService bpos = getBPOSForBlock(block);
 bpos.reportBadBlocks(
 block, volume.getStorageID(), volume.getStorageType());
   }
@@ -1860,6 +1877,10 @@ public class DataNode extends ReconfigurableBase
   private void reportBadBlock(final BPOfferService bpos,
   final ExtendedBlock block, final String msg) {
 FsVolumeSpi volume = getFSDataset().getVolume(block);
+if (volume == null) {
+  LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
+  return;
+}
 bpos.reportBadBlocks(
 block, volume.getStorageID(), volume.getStorageType());
 LOG.warn(msg);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2dfab43/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index ad546d2..d0300f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 

hadoop git commit: HDFS-10879. TestEncryptionZonesWithKMS#testReadWrite fails intermittently. Contributed by Xiao Chen.

2016-10-18 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 f27cf17f7 -> d053d1c3f


HDFS-10879. TestEncryptionZonesWithKMS#testReadWrite fails intermittently. 
Contributed by Xiao Chen.

(cherry picked from commit d31bef575478fe78c0600fb602e117e4b81c9887)
(cherry picked from commit 11ed4f5d40effcdb26461a393379c6bddaa29bed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d053d1c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d053d1c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d053d1c3

Branch: refs/heads/branch-2.7
Commit: d053d1c3ffb76122f55d98894631676edaf18660
Parents: f27cf17
Author: Xiao Chen 
Authored: Tue Sep 20 16:52:05 2016 -0700
Committer: Zhe Zhang 
Committed: Tue Oct 18 10:16:30 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 22 +++-
 2 files changed, 24 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d053d1c3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a13a566..1776a49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -157,6 +157,9 @@ Release 2.7.4 - UNRELEASED
 HDFS-10512. VolumeScanner may terminate due to NPE in 
 DataNode.reportBadBlocks. Contributed by Wei-Chiu Chuang and Yiqun Lin.
 
+HDFS-10879. TestEncryptionZonesWithKMS#testReadWrite fails intermittently.
+(xiaochen)
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d053d1c3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index a30f396..39f76bd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import 
org.apache.hadoop.crypto.key.kms.server.EagerKeyGeneratorKeyProviderCryptoExtension;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -635,14 +636,33 @@ public class TestEncryptionZones {
 // Roll the key of the encryption zone
 assertNumZones(1);
 String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
+FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
 cluster.getNamesystem().getProvider().rollNewVersion(keyName);
+/**
+ * due to the cache on the server side, client may get old keys.
+ * @see EagerKeyGeneratorKeyProviderCryptoExtension#rollNewVersion(String)
+ */
+boolean rollSucceeded = false;
+for (int i = 0; i <= EagerKeyGeneratorKeyProviderCryptoExtension
+.KMS_KEY_CACHE_SIZE_DEFAULT + CommonConfigurationKeysPublic.
+KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT; ++i) {
+  KeyProviderCryptoExtension.EncryptedKeyVersion ekv2 =
+  cluster.getNamesystem().getProvider().generateEncryptedKey(TEST_KEY);
+  if (!(feInfo1.getEzKeyVersionName()
+  .equals(ekv2.getEncryptionKeyVersionName( {
+rollSucceeded = true;
+break;
+  }
+}
+Assert.assertTrue("rollover did not generate a new key even after"
++ " queue is drained", rollSucceeded);
+
 // Read them back in and compare byte-by-byte
 verifyFilesEqual(fs, baseFile, encFile1, len);
 // Write a new enc file and validate
 final Path encFile2 = new Path(zone, "myfile2");
 DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
 // FEInfos should be different
-FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
 FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
 assertFalse("EDEKs should be different", Arrays
 .equals(feInfo1.getEncryptedDataEncryptionKey(),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org


hadoop git commit: HADOOP-12259. Utility to Dynamic port allocation (brahmareddy via rkanter)

2016-10-18 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 d053d1c3f -> bd1676359


HADOOP-12259. Utility to Dynamic port allocation (brahmareddy via rkanter)

(cherry picked from commit ee233ec95ce8cfc8309d3adc072d926cd85eba08)
(cherry picked from commit 731ed9cad81f91e9da18148f0c4757a38ca406f6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd167635
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd167635
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd167635

Branch: refs/heads/branch-2.7
Commit: bd16763591ec24c7f180353990d1644655968e66
Parents: d053d1c
Author: Robert Kanter 
Authored: Fri Jul 24 09:41:53 2015 -0700
Committer: Zhe Zhang 
Committed: Tue Oct 18 10:25:52 2016 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  1 +
 .../org/apache/hadoop/net/ServerSocketUtil.java | 63 
 2 files changed, 64 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd167635/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4f16aa6..ffbc311 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -7,6 +7,7 @@ Release 2.7.4 - UNRELEASED
   NEW FEATURES
 
   IMPROVEMENTS
+HADOOP-12259. Utility to Dynamic port allocation (brahmareddy via rkanter)
 
   OPTIMIZATIONS
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd167635/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
new file mode 100644
index 000..0ce835f
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.net;
+
+import java.io.IOException;
+import java.net.ServerSocket;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class ServerSocketUtil {
+
+  private static final Log LOG = LogFactory.getLog(ServerSocketUtil.class);
+
+  /**
+   * Port scan & allocate is how most other apps find ports
+   * 
+   * @param port given port
+   * @param retries number of retires
+   * @return
+   * @throws IOException
+   */
+  public static int getPort(int port, int retries) throws IOException {
+Random rand = new Random();
+int tryPort = port;
+int tries = 0;
+while (true) {
+  if (tries > 0) {
+tryPort = port + rand.nextInt(65535 - port);
+  }
+  LOG.info("Using port " + tryPort);
+  try (ServerSocket s = new ServerSocket(tryPort)) {
+return tryPort;
+  } catch (IOException e) {
+tries++;
+if (tries >= retries) {
+  LOG.info("Port is already in use; giving up");
+  throw e;
+} else {
+  LOG.info("Port is already in use; trying again");
+}
+  }
+}
+  }
+
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-9444. Add utility to find set of available ephemeral ports to ServerSocketUtil. Contributed by Masatake Iwasaki

2016-10-18 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 4aea76067 -> 1e5c40b5b


HDFS-9444. Add utility to find set of available ephemeral ports to 
ServerSocketUtil. Contributed by Masatake Iwasaki

(cherry picked from commit e9a34ae29c7390f3ffcbeee02dc5faa26fca482a)
(cherry picked from commit 5f754e8638d5a35ab12765edec6561228312f71c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e5c40b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e5c40b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e5c40b5

Branch: refs/heads/branch-2.7
Commit: 1e5c40b5b4b013bd63e3c0e060cb4c31e5e8ba82
Parents: 4aea760
Author: Brahma Reddy Battula 
Authored: Wed Sep 28 10:50:50 2016 +0530
Committer: Zhe Zhang 
Committed: Tue Oct 18 10:50:19 2016 -0700

--
 .../org/apache/hadoop/net/ServerSocketUtil.java | 22 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/namenode/ha/TestEditLogTailer.java   | 39 +++-
 3 files changed, 54 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e5c40b5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
index 0ce835f..b9e2c62 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
@@ -60,4 +60,26 @@ public class ServerSocketUtil {
 }
   }
 
+  /**
+   * Find the specified number of unique ports available.
+   * The ports are all closed afterwards,
+   * so other network services started may grab those same ports.
+   *
+   * @param numPorts number of required port nubmers
+   * @return array of available port numbers
+   * @throws IOException
+   */
+  public static int[] getPorts(int numPorts) throws IOException {
+ServerSocket[] sockets = new ServerSocket[numPorts];
+int[] ports = new int[numPorts];
+for (int i = 0; i < numPorts; i++) {
+  ServerSocket sock = new ServerSocket(0);
+  sockets[i] = sock;
+  ports[i] = sock.getLocalPort();
+}
+for (ServerSocket sock : sockets) {
+  sock.close();
+}
+return ports;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e5c40b5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c9333a1..b19863b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -169,6 +169,9 @@ Release 2.7.4 - UNRELEASED
 HDFS-10301. Remove FBR tracking state to fix false zombie storage
 detection for interleaving block reports. (Vinitha Gankidi via shv)
 
+HDFS-9444. Add utility to find set of available ephemeral ports to
+ServerSocketUtil. (Masatake Iwasaki via Brahma Reddy Battula)
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e5c40b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
index 8c61c92..ea7b00a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
@@ -19,8 +19,11 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static org.junit.Assert.assertTrue;
 
+import static org.junit.Assert.fail;
+
 import java.io.File;
 import java.io.IOException;
+import java.net.BindException;
 import java.net.URI;
 
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -37,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import 

hadoop git commit: YARN-5743. [Atsv2] Publish queue name and RMAppMetrics to ATS (Rohith Sharma K S via Varun Saxena)

2016-10-18 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/trunk d26a1bb9d -> b154d3edc


YARN-5743. [Atsv2] Publish queue name and RMAppMetrics to ATS (Rohith Sharma K 
S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b154d3ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b154d3ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b154d3ed

Branch: refs/heads/trunk
Commit: b154d3edcee95254d41c237142870f39e826a519
Parents: d26a1bb
Author: Varun Saxena 
Authored: Tue Oct 18 23:32:52 2016 +0530
Committer: Varun Saxena 
Committed: Tue Oct 18 23:32:52 2016 +0530

--
 .../metrics/ApplicationMetricsConstants.java| 16 ++-
 .../metrics/TimelineServiceV2Publisher.java | 49 ++--
 .../TestSystemMetricsPublisherForV2.java| 18 ---
 3 files changed, 70 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b154d3ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
index 1774208..521e0af 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
@@ -71,10 +71,22 @@ public class ApplicationMetricsConstants {
   "YARN_APPLICATION_STATE";
   
   public static final String APP_CPU_METRICS =
-  "YARN_APPLICATION_CPU_METRIC";
+  "YARN_APPLICATION_CPU";
   
   public static final String APP_MEM_METRICS =
-  "YARN_APPLICATION_MEM_METRIC";
+  "YARN_APPLICATION_MEMORY";
+
+  public static final String APP_RESOURCE_PREEMPTED_CPU =
+  "YARN_APPLICATION_RESOURCE_PREEMPTED_CPU";
+
+  public static final String APP_RESOURCE_PREEMPTED_MEM =
+  "YARN_APPLICATION_RESOURCE_PREEMPTED_MEMORY";
+
+  public static final String APP_NON_AM_CONTAINER_PREEMPTED =
+  "YARN_APPLICATION_NON_AM_CONTAINER_PREEMPTED";
+
+  public static final String APP_AM_CONTAINER_PREEMPTED =
+  "YARN_APPLICATION_AM_CONTAINER_PREEMPTED";
 
   public static final String LATEST_APP_ATTEMPT_EVENT_INFO =
   "YARN_APPLICATION_LATEST_APP_ATTEMPT";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b154d3ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index dbdc1a8..f039ebe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.yarn.server.resourcemanager.metrics;
 
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -40,6 +42,7 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import 

hadoop git commit: YARN-5743. [Atsv2] Publish queue name and RMAppMetrics to ATS (Rohith Sharma K S via Varun Saxena)

2016-10-18 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 059fac44e -> fb2090571


YARN-5743. [Atsv2] Publish queue name and RMAppMetrics to ATS (Rohith Sharma K 
S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb209057
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb209057
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb209057

Branch: refs/heads/YARN-5355-branch-2
Commit: fb20905714268a96ef1e21cdfb90612182f885f9
Parents: 059fac4
Author: Varun Saxena 
Authored: Tue Oct 18 23:45:48 2016 +0530
Committer: Varun Saxena 
Committed: Tue Oct 18 23:45:48 2016 +0530

--
 .../metrics/ApplicationMetricsConstants.java| 16 ++-
 .../metrics/TimelineServiceV2Publisher.java | 49 ++--
 .../TestSystemMetricsPublisherForV2.java| 18 ---
 3 files changed, 70 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb209057/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
index d06b7cb..a21256d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
@@ -71,10 +71,22 @@ public class ApplicationMetricsConstants {
   "YARN_APPLICATION_STATE";
   
   public static final String APP_CPU_METRICS =
-  "YARN_APPLICATION_CPU_METRIC";
+  "YARN_APPLICATION_CPU";
   
   public static final String APP_MEM_METRICS =
-  "YARN_APPLICATION_MEM_METRIC";
+  "YARN_APPLICATION_MEMORY";
+
+  public static final String APP_RESOURCE_PREEMPTED_CPU =
+  "YARN_APPLICATION_RESOURCE_PREEMPTED_CPU";
+
+  public static final String APP_RESOURCE_PREEMPTED_MEM =
+  "YARN_APPLICATION_RESOURCE_PREEMPTED_MEMORY";
+
+  public static final String APP_NON_AM_CONTAINER_PREEMPTED =
+  "YARN_APPLICATION_NON_AM_CONTAINER_PREEMPTED";
+
+  public static final String APP_AM_CONTAINER_PREEMPTED =
+  "YARN_APPLICATION_AM_CONTAINER_PREEMPTED";
 
   public static final String LATEST_APP_ATTEMPT_EVENT_INFO =
   "YARN_APPLICATION_LATEST_APP_ATTEMPT";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb209057/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 3809250..2b6db8f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.yarn.server.resourcemanager.metrics;
 
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -39,6 +41,7 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import 

hadoop git commit: YARN-5741. [YARN-3368] Update UI2 documentation for new UI2 path (Kai Sasaki and Wangda Tan via Sunil G)

2016-10-18 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368 b133ccf44 -> 1d6895580


YARN-5741. [YARN-3368] Update UI2 documentation for new UI2 path (Kai Sasaki 
and Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d689558
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d689558
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d689558

Branch: refs/heads/YARN-3368
Commit: 1d6895580f8ddf728c236b7dbe79c436b53bc978
Parents: b133ccf
Author: sunilg 
Authored: Tue Oct 18 23:49:55 2016 +0530
Committer: sunilg 
Committed: Tue Oct 18 23:49:55 2016 +0530

--
 .../src/site/markdown/YarnUI2.md| 28 +---
 1 file changed, 12 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d689558/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
index ff48183..f646d3d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
@@ -18,30 +18,26 @@
 Hadoop: YARN-UI V2
 =
 
-Prerequisites
--
-
-If you run RM locally in your computer for test purpose, you need the 
following things properly installed.
-
-- Install Node.js with NPM: https://nodejs.org/download
-- After Node.js installed, install `corsproxy`: `npm install -g corsproxy`.
-
-
 Configurations
 -
 
-*In yarn-site.xml*
+*In `yarn-site.xml`*
 
 | Configuration Property | Description |
 |: |: |
-| `yarn.resourcemanager.webapp.ui2.enable` | In the server side it indicates 
whether the new YARN-UI v2 is enabled or not. Defaults to `false`. |
-| `yarn.resourcemanager.webapp.ui2.address` | Specify the address of 
ResourceManager and port which host YARN-UI v2, defaults to `localhost:8288`. |
+| `yarn.webapp.ui2.enable` | *(Required)* In the server side it indicates 
whether the new YARN-UI v2 is enabled or not. Defaults to `false`. |
+| `yarn.webapp.ui2.war-file-path` | *(Optional)* WAR file path for launching 
yarn UI2 web application. By default this is empty and YARN will lookup 
required war file from classpath |
 
-*In $HADOOP_PREFIX/share/hadoop/yarn/webapps/rm/config/configs.env*
+Please note that, If you run YARN daemons locally in your machine for test 
purpose,
+you need the following configurations added to `yarn-site.xml` to enable cross
+origin (CORS) support.
 
-- Update timelineWebAddress and rmWebAddress to the actual addresses run 
resource manager and timeline server
-- If you run RM locally in you computer just for test purpose, you need to 
keep `corsproxy` running. Otherwise, you need to set `localBaseAddress` to 
empty.
+| Configuration Property | Value | Description |
+|: |: |: |
+| `yarn.timeline-service.http-cross-origin.enabled` | true | Enable CORS 
support for Timeline Server  |
+| `yarn.resourcemanager.webapp.cross-origin.enabled` | true | Enable CORS 
support for Resource Manager  |
+| `yarn.nodemanager.webapp.cross-origin.enabled` | true | Enable CORS support 
for Node Manager  |
 
 Use it
 -
-Open your browser, go to `rm-address:8288` and try it!
+Open your browser, go to `rm-address:8088/ui2` and try it!


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-9333. Some tests using MiniDFSCluster errored complaining port in use. (iwasakims)

2016-10-18 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 bd1676359 -> fd3df4bb0


HDFS-9333. Some tests using MiniDFSCluster errored complaining port in use. 
(iwasakims)

(cherry picked from commit 964e546ab1dba5f5d53b209ec6c9a70a85654765)

 Conflicts:
  
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java

(cherry picked from commit 1b91ebb7145860ecda394d764da5fc7063406fee)
(cherry picked from commit 1524e6320659df7d066ac96fd2c190d7a7c5b932)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd3df4bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd3df4bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd3df4bb

Branch: refs/heads/branch-2.7
Commit: fd3df4bb041ddc8ac3602c426c924254367f0952
Parents: bd16763
Author: Masatake Iwasaki 
Authored: Wed Sep 21 10:35:25 2016 +0900
Committer: Zhe Zhang 
Committed: Tue Oct 18 10:32:16 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../blockmanagement/TestBlockTokenWithDFS.java|  8 +++-
 .../namenode/ha/TestDFSZKFailoverController.java  | 18 +-
 3 files changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd3df4bb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1776a49..03eca7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -160,6 +160,9 @@ Release 2.7.4 - UNRELEASED
 HDFS-10879. TestEncryptionZonesWithKMS#testReadWrite fails intermittently.
 (xiaochen)
 
+HDFS-9333. Some tests using MiniDFSCluster errored complaining port in
+use. (iwasakims)
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd3df4bb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index b15cb38..0de54ec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.log4j.Level;
 import org.junit.Assert;
@@ -341,7 +342,12 @@ public class TestBlockTokenWithDFS {
 Configuration conf = getConf(numDataNodes);
 
 try {
-  cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
+  // prefer non-ephemeral port to avoid port collision on restartNameNode
+  cluster = new MiniDFSCluster.Builder(conf)
+  .nameNodePort(ServerSocketUtil.getPort(19820, 100))
+  .nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
+  .numDataNodes(numDataNodes)
+  .build();
   cluster.waitActive();
   assertEquals(numDataNodes, cluster.getDataNodes().size());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd3df4bb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
index bcbd543..782f80c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
@@ -39,6 +39,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
 import 

hadoop git commit: YARN-5718. TimelineClient (and other places in YARN) shouldn't over-write HDFS client retry settings which could cause unexpected behavior. Contributed by Junping Du.

2016-10-18 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk b154d3edc -> b733a6f86


YARN-5718. TimelineClient (and other places in YARN) shouldn't over-write HDFS 
client retry settings which could cause unexpected behavior. Contributed by 
Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b733a6f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b733a6f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b733a6f8

Branch: refs/heads/trunk
Commit: b733a6f86262522e535cebc972baecbe6a6eab50
Parents: b154d3e
Author: Xuan 
Authored: Tue Oct 18 11:04:49 2016 -0700
Committer: Xuan 
Committed: Tue Oct 18 11:06:47 2016 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 17 +
 .../api/impl/FileSystemTimelineWriter.java  |  7 ---
 .../nodelabels/FileSystemNodeLabelsStore.java   |  7 +--
 .../src/main/resources/yarn-default.xml | 20 
 .../recovery/FileSystemRMStateStore.java|  5 -
 .../recovery/TestFSRMStateStore.java|  4 
 6 files changed, 2 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b733a6f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 3bd0dcc..1a30c32 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -695,10 +695,6 @@ public class YarnConfiguration extends Configuration {
   /** URI for FileSystemRMStateStore */
   public static final String FS_RM_STATE_STORE_URI = RM_PREFIX
   + "fs.state-store.uri";
-  public static final String FS_RM_STATE_STORE_RETRY_POLICY_SPEC = RM_PREFIX
-  + "fs.state-store.retry-policy-spec";
-  public static final String DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC =
-  "2000, 500";
 
   public static final String FS_RM_STATE_STORE_NUM_RETRIES =
   RM_PREFIX + "fs.state-store.num-retries";
@@ -1974,13 +1970,6 @@ public class YarnConfiguration extends Configuration {
   TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_UNKNOWN_ACTIVE_SECONDS_DEFAULT
   = 24 * 60 * 60;
 
-  public static final String
-  TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC =
-  TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "retry-policy-spec";
-  public static final String
-  DEFAULT_TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC =
-  "2000, 500";
-
   public static final String TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE =
   TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX
   + "leveldb-cache-read-cache-size";
@@ -2600,11 +2589,7 @@ public class YarnConfiguration extends Configuration {
   /** URI for NodeLabelManager */
   public static final String FS_NODE_LABELS_STORE_ROOT_DIR = NODE_LABELS_PREFIX
   + "fs-store.root-dir";
-  public static final String FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC =
-  NODE_LABELS_PREFIX + "fs-store.retry-policy-spec";
-  public static final String DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC =
-  "2000, 500";
-  
+
   /**
* Flag to indicate if the node labels feature enabled, by default it's
* disabled

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b733a6f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
index 55d6bd2..b1284e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
@@ -106,13 +106,6 @@ public class FileSystemTimelineWriter extends 
TimelineWriter{
 super(authUgi, client, resURI);
 
 Configuration fsConf = new Configuration(conf);
-fsConf.setBoolean("dfs.client.retry.policy.enabled", true);
-String retryPolicy =
-

hadoop git commit: HADOOP-13535. Add jetty6 acceptor startup issue workaround to branch-2. Contributed by Min Shen.

2016-10-18 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 b2dfab432 -> f27cf17f7


HADOOP-13535. Add jetty6 acceptor startup issue workaround to branch-2. 
Contributed by Min Shen.

(cherry picked from commit 23984e17870bcff6fffd8e1ca185fd37e2352b29)
(cherry picked from commit 6ca5ffe4b52c13e71027c465c9a757724f6b091d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f27cf17f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f27cf17f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f27cf17f

Branch: refs/heads/branch-2.7
Commit: f27cf17f7e82f875e49d2e55f70c24cb941b4716
Parents: b2dfab4
Author: Wei-Chiu Chuang 
Authored: Tue Oct 18 10:03:46 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Oct 18 10:03:46 2016 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../ssl/SslSelectChannelConnectorSecure.java| 29 
 2 files changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f27cf17f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 556b9dc..4f16aa6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -67,6 +67,9 @@ Release 2.7.4 - UNRELEASED
 HADOOP-13670. Update CHANGES.txt to reflect all the changes in branch-2.7.
 (Brahma Reddy Battula)
 
+HADOOP-13535. Add jetty6 acceptor startup issue workaround to branch-2.
+(Min Shen).
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f27cf17f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSelectChannelConnectorSecure.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSelectChannelConnectorSecure.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSelectChannelConnectorSecure.java
index 7de689b..7ff2292 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSelectChannelConnectorSecure.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSelectChannelConnectorSecure.java
@@ -23,6 +23,8 @@ import java.util.ArrayList;
 
 import javax.net.ssl.SSLEngine;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.mortbay.jetty.security.SslSelectChannelConnector;
 
@@ -34,6 +36,8 @@ import org.mortbay.jetty.security.SslSelectChannelConnector;
  */
 @InterfaceAudience.Private
 public class SslSelectChannelConnectorSecure extends SslSelectChannelConnector 
{
+  public static final Log LOG =
+  LogFactory.getLog(SslSelectChannelConnectorSecure.class);
 
   public SslSelectChannelConnectorSecure() {
 super();
@@ -55,4 +59,29 @@ public class SslSelectChannelConnectorSecure extends 
SslSelectChannelConnector {
 new String[nonSSLProtocols.size()]));
 return engine;
   }
+
+  /* Override the broken isRunning() method (JETTY-1316). This bug is present
+   * in 6.1.26. For the versions wihout this bug, it adds insignificant
+   * overhead.
+   */
+  @Override
+  public boolean isRunning() {
+if (super.isRunning()) {
+  return true;
+}
+// We might be hitting JETTY-1316. If the internal state changed from
+// STARTING to STARTED in the middle of the check, the above call may
+// return false.  Check it one more time.
+LOG.warn("HttpServer Acceptor: isRunning is false. Rechecking.");
+try {
+  Thread.sleep(10);
+} catch (InterruptedException ie) {
+  // Mark this thread as interrupted. Someone up in the call chain
+  // might care.
+  Thread.currentThread().interrupt();
+}
+boolean runState = super.isRunning();
+LOG.warn("HttpServer Acceptor: isRunning is " + runState);
+return runState;
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Update CHANGES.txt for HDFS-10712 and HDFS-10301. (shv)

2016-10-18 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 fd3df4bb0 -> 4aea76067


Update CHANGES.txt for HDFS-10712 and HDFS-10301. (shv)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4aea7606
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4aea7606
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4aea7606

Branch: refs/heads/branch-2.7
Commit: 4aea760676f740c93de74ef510256ee5a02fdd7c
Parents: fd3df4b
Author: Konstantin V Shvachko 
Authored: Tue Oct 18 10:33:58 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Tue Oct 18 10:37:01 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aea7606/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 03eca7a..c9333a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -52,6 +52,9 @@ Release 2.7.4 - UNRELEASED
 
 HDFS-8824. Do not use small blocks for balancing the cluster.  (szetszwo)
 
+HDFS-10712. TestDataNodeVolumeFailure should pass not null
+BlockReportContext. (Vinitha Gankidi via shv)
+
   OPTIMIZATIONS
 
 HDFS-10896. Move lock logging logic from FSNamesystem into 
FSNamesystemLock.
@@ -163,6 +166,9 @@ Release 2.7.4 - UNRELEASED
 HDFS-9333. Some tests using MiniDFSCluster errored complaining port in
 use. (iwasakims)
 
+HDFS-10301. Remove FBR tracking state to fix false zombie storage
+detection for interleaving block reports. (Vinitha Gankidi via shv)
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5743. [Atsv2] Publish queue name and RMAppMetrics to ATS (Rohith Sharma K S via Varun Saxena)

2016-10-18 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 787d59420 -> 70979018e


YARN-5743. [Atsv2] Publish queue name and RMAppMetrics to ATS (Rohith Sharma K 
S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70979018
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70979018
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70979018

Branch: refs/heads/YARN-5355
Commit: 70979018e125a417e376cf746c79620711cc12b6
Parents: 787d594
Author: Varun Saxena 
Authored: Tue Oct 18 23:44:34 2016 +0530
Committer: Varun Saxena 
Committed: Tue Oct 18 23:44:34 2016 +0530

--
 .../metrics/ApplicationMetricsConstants.java| 16 ++-
 .../metrics/TimelineServiceV2Publisher.java | 49 ++--
 .../TestSystemMetricsPublisherForV2.java| 18 ---
 3 files changed, 70 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70979018/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
index d06b7cb..a21256d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
@@ -71,10 +71,22 @@ public class ApplicationMetricsConstants {
   "YARN_APPLICATION_STATE";
   
   public static final String APP_CPU_METRICS =
-  "YARN_APPLICATION_CPU_METRIC";
+  "YARN_APPLICATION_CPU";
   
   public static final String APP_MEM_METRICS =
-  "YARN_APPLICATION_MEM_METRIC";
+  "YARN_APPLICATION_MEMORY";
+
+  public static final String APP_RESOURCE_PREEMPTED_CPU =
+  "YARN_APPLICATION_RESOURCE_PREEMPTED_CPU";
+
+  public static final String APP_RESOURCE_PREEMPTED_MEM =
+  "YARN_APPLICATION_RESOURCE_PREEMPTED_MEMORY";
+
+  public static final String APP_NON_AM_CONTAINER_PREEMPTED =
+  "YARN_APPLICATION_NON_AM_CONTAINER_PREEMPTED";
+
+  public static final String APP_AM_CONTAINER_PREEMPTED =
+  "YARN_APPLICATION_AM_CONTAINER_PREEMPTED";
 
   public static final String LATEST_APP_ATTEMPT_EVENT_INFO =
   "YARN_APPLICATION_LATEST_APP_ATTEMPT";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70979018/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 3809250..2b6db8f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.yarn.server.resourcemanager.metrics;
 
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -39,6 +41,7 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import 

hadoop git commit: HADOOP-13522. Add %A and %a formats for fs -stat command to print permissions. Contributed by Alex Garbarini.

2016-10-18 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0bc6d37f3 -> bedfec0c1


HADOOP-13522. Add %A and %a formats for fs -stat command to print permissions. 
Contributed by Alex Garbarini.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bedfec0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bedfec0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bedfec0c

Branch: refs/heads/trunk
Commit: bedfec0c10144087168bc79501ffd5ab4fa52606
Parents: 0bc6d37
Author: Akira Ajisaka 
Authored: Tue Oct 18 14:37:32 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Oct 18 15:00:44 2016 +0900

--
 .../hadoop/fs/permission/FsPermission.java  | 12 
 .../java/org/apache/hadoop/fs/shell/Stat.java   | 11 ++-
 .../src/site/markdown/FileSystemShell.md|  4 ++--
 .../src/test/resources/testConf.xml |  6 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 20 +---
 5 files changed, 46 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bedfec0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 48a5b1c..fabfc12 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -183,6 +183,18 @@ public class FsPermission implements Writable {
 return toShort();
   }
 
+  /**
+   * Returns the FsPermission in an octal format.
+   *
+   * @return short Unlike {@link #toShort()} which provides a binary
+   * representation, this method returns the standard octal style permission.
+   */
+  public short toOctal() {
+int n = this.toShort();
+int octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
+return (short)octal;
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (obj instanceof FsPermission) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bedfec0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
index 458d3ee..42f7843 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.fs.FileStatus;
 /**
  * Print statistics about path in specified format.
  * Format sequences:
+ *   %a: Permissions in octal
+ *   %A: Permissions in symbolic style
  *   %b: Size of file in blocks
  *   %F: Type
  *   %g: Group name of owner
@@ -56,7 +58,8 @@ class Stat extends FsCommand {
   public static final String USAGE = "[format]  ...";
   public static final String DESCRIPTION =
 "Print statistics about the file/directory at " + NEWLINE +
-"in the specified format. Format accepts filesize in" + NEWLINE +
+"in the specified format. Format accepts permissions in" + NEWLINE +
+"octal (%a) and symbolic (%A), filesize in" + NEWLINE +
 "blocks (%b), type (%F), group name of owner (%g)," + NEWLINE +
 "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
 "of owner (%u), modification date (%y, %Y)." + NEWLINE +
@@ -95,6 +98,12 @@ class Stat extends FsCommand {
 // this silently drops a trailing %?
 if (i + 1 == fmt.length) break;
 switch (fmt[++i]) {
+  case 'a':
+buf.append(stat.getPermission().toOctal());
+break;
+  case 'A':
+buf.append(stat.getPermission());
+break;
   case 'b':
 buf.append(stat.getLen());
 break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bedfec0c/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index ee7bc28..060c775 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ 

hadoop git commit: HADOOP-13522. Add %A and %a formats for fs -stat command to print permissions. Contributed by Alex Garbarini.

2016-10-18 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 278eeb105 -> b2618685f


HADOOP-13522. Add %A and %a formats for fs -stat command to print permissions. 
Contributed by Alex Garbarini.

(cherry picked from commit bedfec0c10144087168bc79501ffd5ab4fa52606)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2618685
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2618685
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2618685

Branch: refs/heads/branch-2
Commit: b2618685fb8defd158ab06656191f958fba09043
Parents: 278eeb1
Author: Akira Ajisaka 
Authored: Tue Oct 18 14:37:32 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Oct 18 15:03:33 2016 +0900

--
 .../hadoop/fs/permission/FsPermission.java  | 12 
 .../java/org/apache/hadoop/fs/shell/Stat.java   | 11 ++-
 .../src/site/markdown/FileSystemShell.md|  4 ++--
 .../src/test/resources/testConf.xml |  6 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 20 +---
 5 files changed, 46 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2618685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 0258293..c4e377a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -169,6 +169,18 @@ public class FsPermission implements Writable {
 return toShort();
   }
 
+  /**
+   * Returns the FsPermission in an octal format.
+   *
+   * @return short Unlike {@link #toShort()} which provides a binary
+   * representation, this method returns the standard octal style permission.
+   */
+  public short toOctal() {
+int n = this.toShort();
+int octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
+return (short)octal;
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (obj instanceof FsPermission) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2618685/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
index 458d3ee..42f7843 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.fs.FileStatus;
 /**
  * Print statistics about path in specified format.
  * Format sequences:
+ *   %a: Permissions in octal
+ *   %A: Permissions in symbolic style
  *   %b: Size of file in blocks
  *   %F: Type
  *   %g: Group name of owner
@@ -56,7 +58,8 @@ class Stat extends FsCommand {
   public static final String USAGE = "[format]  ...";
   public static final String DESCRIPTION =
 "Print statistics about the file/directory at " + NEWLINE +
-"in the specified format. Format accepts filesize in" + NEWLINE +
+"in the specified format. Format accepts permissions in" + NEWLINE +
+"octal (%a) and symbolic (%A), filesize in" + NEWLINE +
 "blocks (%b), type (%F), group name of owner (%g)," + NEWLINE +
 "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
 "of owner (%u), modification date (%y, %Y)." + NEWLINE +
@@ -95,6 +98,12 @@ class Stat extends FsCommand {
 // this silently drops a trailing %?
 if (i + 1 == fmt.length) break;
 switch (fmt[++i]) {
+  case 'a':
+buf.append(stat.getPermission().toOctal());
+break;
+  case 'A':
+buf.append(stat.getPermission());
+break;
   case 'b':
 buf.append(stat.getLen());
 break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2618685/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 1d4174c..df0bb6c 100644
--- 

[1/2] hadoop git commit: HADOOP-13061. Refactor erasure coders. Contributed by Kai Sasaki

2016-10-18 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/trunk bedfec0c1 -> c023c7488


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c023c748/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index afaaf24..6e679c3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
@@ -32,15 +31,11 @@ import 
org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
  * It implements {@link ErasureCoder}.
  */
 @InterfaceAudience.Private
-public class RSErasureDecoder extends AbstractErasureDecoder {
+public class RSErasureDecoder extends ErasureDecoder {
   private RawErasureDecoder rsRawDecoder;
 
-  public RSErasureDecoder(int numDataUnits, int numParityUnits) {
-super(numDataUnits, numParityUnits);
-  }
-
-  public RSErasureDecoder(ECSchema schema) {
-super(schema);
+  public RSErasureDecoder(ErasureCoderOptions options) {
+super(options);
   }
 
   @Override
@@ -56,11 +51,8 @@ public class RSErasureDecoder extends AbstractErasureDecoder 
{
 
   private RawErasureDecoder checkCreateRSRawDecoder() {
 if (rsRawDecoder == null) {
-  // TODO: we should create the raw coder according to codec.
-  ErasureCoderOptions coderOptions = new ErasureCoderOptions(
-  getNumDataUnits(), getNumParityUnits());
   rsRawDecoder = CodecUtil.createRawDecoder(getConf(),
-  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
+  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
 }
 return rsRawDecoder;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c023c748/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
index 2139113..7a09b92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
@@ -32,15 +31,11 @@ import 
org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
  * It implements {@link ErasureCoder}.
  */
 @InterfaceAudience.Private
-public class RSErasureEncoder extends AbstractErasureEncoder {
+public class RSErasureEncoder extends ErasureEncoder {
   private RawErasureEncoder rawEncoder;
 
-  public RSErasureEncoder(int numDataUnits, int numParityUnits) {
-super(numDataUnits, numParityUnits);
-  }
-
-  public RSErasureEncoder(ECSchema schema) {
-super(schema);
+  public RSErasureEncoder(ErasureCoderOptions options) {
+super(options);
   }
 
   @Override
@@ -57,10 +52,8 @@ public class RSErasureEncoder extends AbstractErasureEncoder 
{
   private RawErasureEncoder checkCreateRSRawEncoder() {
 if (rawEncoder == null) {
   // TODO: we should create the raw coder according to codec.
-  ErasureCoderOptions coderOptions = new ErasureCoderOptions(
-  getNumDataUnits(), getNumParityUnits());
   rawEncoder = CodecUtil.createRawEncoder(getConf(),
-  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
+  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
 }
 return rawEncoder;
   }
@@ -71,4 +64,9 @@ public class RSErasureEncoder extends 

[2/2] hadoop git commit: HADOOP-13061. Refactor erasure coders. Contributed by Kai Sasaki

2016-10-18 Thread drankye
HADOOP-13061. Refactor erasure coders. Contributed by Kai Sasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c023c748
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c023c748
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c023c748

Branch: refs/heads/trunk
Commit: c023c748869063fb67d14ea996569c42578d1cea
Parents: bedfec0
Author: Kai Zheng 
Authored: Tue Oct 18 12:02:53 2016 +0600
Committer: Kai Zheng 
Committed: Tue Oct 18 12:02:53 2016 +0600

--
 .../hadoop/fs/CommonConfigurationKeys.java  |  26 ---
 .../apache/hadoop/io/erasurecode/CodecUtil.java | 168 ++--
 .../io/erasurecode/ErasureCodeConstants.java|   3 +-
 .../io/erasurecode/ErasureCodecOptions.java |  37 
 .../erasurecode/codec/AbstractErasureCodec.java |  53 -
 .../io/erasurecode/codec/DummyErasureCodec.java |  45 +
 .../io/erasurecode/codec/ErasureCodec.java  |  76 +--
 .../io/erasurecode/codec/HHXORErasureCodec.java |  20 +-
 .../io/erasurecode/codec/RSErasureCodec.java|  20 +-
 .../io/erasurecode/codec/XORErasureCodec.java   |  22 ++-
 .../io/erasurecode/codec/package-info.java  |  28 +++
 .../erasurecode/coder/AbstractErasureCoder.java |  64 --
 .../coder/AbstractErasureCodingStep.java|  61 --
 .../coder/AbstractErasureDecoder.java   | 170 
 .../coder/AbstractErasureEncoder.java   |  62 --
 .../coder/AbstractHHErasureCodingStep.java  |  49 -
 .../erasurecode/coder/DummyErasureDecoder.java  |  46 +
 .../erasurecode/coder/DummyErasureEncoder.java  |  45 +
 .../io/erasurecode/coder/ErasureCoder.java  |  25 ++-
 .../io/erasurecode/coder/ErasureCodingStep.java |   8 +-
 .../io/erasurecode/coder/ErasureDecoder.java| 198 +++
 .../erasurecode/coder/ErasureDecodingStep.java  |  21 +-
 .../io/erasurecode/coder/ErasureEncoder.java|  91 +
 .../erasurecode/coder/ErasureEncodingStep.java  |  22 ++-
 .../erasurecode/coder/HHErasureCodingStep.java  |  68 +++
 .../erasurecode/coder/HHXORErasureDecoder.java  |  24 +--
 .../coder/HHXORErasureDecodingStep.java |   2 +-
 .../erasurecode/coder/HHXORErasureEncoder.java  |  19 +-
 .../coder/HHXORErasureEncodingStep.java |   2 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  |  16 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  20 +-
 .../io/erasurecode/coder/XORErasureDecoder.java |  15 +-
 .../io/erasurecode/coder/XORErasureEncoder.java |  16 +-
 .../io/erasurecode/coder/package-info.java  |  28 +++
 .../io/erasurecode/rawcoder/CoderUtil.java  |   2 +-
 .../conf/TestCommonConfigurationFields.java |   5 +-
 .../erasurecode/TestCodecRawCoderMapping.java   |   3 +-
 .../codec/TestHHXORErasureCodec.java|   6 +-
 .../erasurecode/coder/TestErasureCoderBase.java |  13 +-
 .../coder/TestHHXORErasureCoder.java|   4 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |   4 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |   3 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   6 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |   4 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |   4 +-
 45 files changed, 964 insertions(+), 660 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c023c748/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 2b530f0..fe522b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -21,9 +21,6 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
-import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
-import 
org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy;
-import org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory;
 
 /** 
  * This class contains constants for configuration keys used
@@ -160,30 +157,7 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final boolean IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT =
   false;
 
-  /**
-   * Erasure Coding configuration family
-   */
 
-  /** Supported erasure codec 

[04/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. 
Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc176961
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc176961
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc176961

Branch: refs/heads/branch-2
Commit: bc176961e674c8a770d96164602fb565fdeb1fb0
Parents: 3972bb3
Author: Steve Loughran 
Authored: Tue Oct 18 19:33:38 2016 +0100
Committer: Steve Loughran 
Committed: Tue Oct 18 19:33:38 2016 +0100

--
 .../src/main/resources/core-default.xml |  74 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |  16 +-
 hadoop-tools/hadoop-aws/pom.xml |  58 +-
 .../s3a/BlockingThreadPoolExecutorService.java  | 184 +
 .../org/apache/hadoop/fs/s3a/Constants.java |  71 +-
 .../hadoop/fs/s3a/S3ABlockOutputStream.java | 703 
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java | 821 +++
 .../hadoop/fs/s3a/S3AFastOutputStream.java  | 410 -
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 418 --
 .../hadoop/fs/s3a/S3AInstrumentation.java   | 248 +-
 .../apache/hadoop/fs/s3a/S3AOutputStream.java   |  57 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  39 +
 .../fs/s3a/SemaphoredDelegatingExecutor.java| 230 ++
 .../org/apache/hadoop/fs/s3a/Statistic.java |  32 +-
 .../src/site/markdown/tools/hadoop-aws/index.md | 668 +--
 .../fs/contract/s3a/ITestS3AContractDistCp.java |  10 +-
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  |   1 +
 .../ITestBlockingThreadPoolExecutorService.java |  48 +-
 .../hadoop/fs/s3a/ITestS3ABlockOutputArray.java |  90 ++
 .../fs/s3a/ITestS3ABlockOutputByteBuffer.java   |  30 +
 .../hadoop/fs/s3a/ITestS3ABlockOutputDisk.java  |  30 +
 .../fs/s3a/ITestS3ABlockingThreadPool.java  |   2 +
 .../hadoop/fs/s3a/ITestS3AConfiguration.java|  29 +
 .../ITestS3AEncryptionBlockOutputStream.java|  36 +
 .../s3a/ITestS3AEncryptionFastOutputStream.java |  35 -
 .../hadoop/fs/s3a/ITestS3AFastOutputStream.java |  74 --
 .../apache/hadoop/fs/s3a/ITestS3ATestUtils.java |  98 +++
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |  75 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  | 148 +++-
 .../apache/hadoop/fs/s3a/TestDataBlocks.java| 124 +++
 .../ITestS3AFileContextStatistics.java  |   1 +
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java | 412 ++
 .../fs/s3a/scale/ITestS3ADeleteManyFiles.java   |  19 +-
 .../s3a/scale/ITestS3AHugeFilesArrayBlocks.java |  31 +
 .../ITestS3AHugeFilesByteBufferBlocks.java  |  34 +
 .../scale/ITestS3AHugeFilesClassicOutput.java   |  41 +
 .../s3a/scale/ITestS3AHugeFilesDiskBlocks.java  |  31 +
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 151 ++--
 38 files changed, 4655 insertions(+), 924 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc176961/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index e8db5d7..b4d019b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1003,8 +1003,8 @@
 
   fs.s3a.threads.max
   10
-   Maximum number of concurrent active (part)uploads,
-which each use a thread from the threadpool.
+  The total number of threads available in the filesystem for data
+uploads *or any other queued filesystem operation*.
 
 
 
@@ -1017,8 +1017,7 @@
 
   fs.s3a.max.total.tasks
   5
-  Number of (part)uploads allowed to the queue before
-blocking additional uploads.
+  The number of operations which can be queued for 
execution
 
 
 
@@ -1056,13 +1055,21 @@
   fs.s3a.multipart.purge
   false
   True if you want to purge existing multipart uploads that may 
not have been
- completed/aborted correctly
+completed/aborted correctly. The corresponding purge age is defined in
+fs.s3a.multipart.purge.age.
+If set, when the filesystem is instantiated then all outstanding uploads
+older than the purge age will be terminated -across the entire bucket.
+This will impact multipart uploads by other applications and users. so 
should
+be used sparingly, with an age value chosen to stop failed uploads, without
+breaking ongoing operations.
+  
 
 
 
   fs.s3a.multipart.purge.age
   86400
-  Minimum age in seconds of multipart uploads to 
purge
+  Minimum age in seconds of multipart uploads to purge.
+  
 
 
 
@@ -1095,10 +1102,50 @@
 
  

[10/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c348c56/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index cf785d5..c23e782 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -1,3 +1,4 @@
+
 

[11/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c348c56/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
deleted file mode 100644
index c25d0fb..000
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
+++ /dev/null
@@ -1,410 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.event.ProgressEvent;
-import com.amazonaws.event.ProgressListener;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
-import com.amazonaws.services.s3.model.CannedAccessControlList;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
-import com.amazonaws.services.s3.model.ObjectMetadata;
-import com.amazonaws.services.s3.model.PartETag;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
-import com.amazonaws.services.s3.model.UploadPartRequest;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.util.Progressable;
-import org.slf4j.Logger;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-
-import static org.apache.hadoop.fs.s3a.S3AUtils.*;
-import static org.apache.hadoop.fs.s3a.Statistic.*;
-
-/**
- * Upload files/parts asap directly from a memory buffer (instead of buffering
- * to a file).
- * 
- * Uploads are managed low-level rather than through the AWS TransferManager.
- * This allows for uploading each part of a multi-part upload as soon as
- * the bytes are in memory, rather than waiting until the file is closed.
- * 
- * Unstable: statistics and error handling might evolve
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class S3AFastOutputStream extends OutputStream {
-
-  private static final Logger LOG = S3AFileSystem.LOG;
-  private final String key;
-  private final String bucket;
-  private final AmazonS3 client;
-  private final int partSize;
-  private final int multiPartThreshold;
-  private final S3AFileSystem fs;
-  private final CannedAccessControlList cannedACL;
-  private final ProgressListener progressListener;
-  private final ListeningExecutorService executorService;
-  private MultiPartUpload multiPartUpload;
-  private boolean closed;
-  private ByteArrayOutputStream buffer;
-  private int bufferLimit;
-
-
-  /**
-   * Creates a fast OutputStream that uploads to S3 from memory.
-   * For MultiPartUploads, as soon as sufficient bytes have been written to
-   * the stream a part is uploaded immediately (by using the low-level
-   * multi-part upload API on the AmazonS3Client).
-   *
-   * @param client AmazonS3Client used for S3 calls
-   * @param fs S3AFilesystem
-   * @param bucket S3 bucket name
-   * @param key S3 key name
-   * @param progress report progress in order to prevent timeouts
-   * @param cannedACL used CannedAccessControlList
-   * @param partSize size of a single part in a multi-part upload (except
-   * last part)
-   * @param multiPartThreshold files at least this size use multi-part upload
-   * @param threadPoolExecutor thread factory
-   * @throws IOException on any problem
-   */
-  public S3AFastOutputStream(AmazonS3 client,
- 

[03/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc176961/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
deleted file mode 100644
index c25d0fb..000
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
+++ /dev/null
@@ -1,410 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.event.ProgressEvent;
-import com.amazonaws.event.ProgressListener;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
-import com.amazonaws.services.s3.model.CannedAccessControlList;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
-import com.amazonaws.services.s3.model.ObjectMetadata;
-import com.amazonaws.services.s3.model.PartETag;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
-import com.amazonaws.services.s3.model.UploadPartRequest;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.util.Progressable;
-import org.slf4j.Logger;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-
-import static org.apache.hadoop.fs.s3a.S3AUtils.*;
-import static org.apache.hadoop.fs.s3a.Statistic.*;
-
-/**
- * Upload files/parts asap directly from a memory buffer (instead of buffering
- * to a file).
- * 
- * Uploads are managed low-level rather than through the AWS TransferManager.
- * This allows for uploading each part of a multi-part upload as soon as
- * the bytes are in memory, rather than waiting until the file is closed.
- * 
- * Unstable: statistics and error handling might evolve
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class S3AFastOutputStream extends OutputStream {
-
-  private static final Logger LOG = S3AFileSystem.LOG;
-  private final String key;
-  private final String bucket;
-  private final AmazonS3 client;
-  private final int partSize;
-  private final int multiPartThreshold;
-  private final S3AFileSystem fs;
-  private final CannedAccessControlList cannedACL;
-  private final ProgressListener progressListener;
-  private final ListeningExecutorService executorService;
-  private MultiPartUpload multiPartUpload;
-  private boolean closed;
-  private ByteArrayOutputStream buffer;
-  private int bufferLimit;
-
-
-  /**
-   * Creates a fast OutputStream that uploads to S3 from memory.
-   * For MultiPartUploads, as soon as sufficient bytes have been written to
-   * the stream a part is uploaded immediately (by using the low-level
-   * multi-part upload API on the AmazonS3Client).
-   *
-   * @param client AmazonS3Client used for S3 calls
-   * @param fs S3AFilesystem
-   * @param bucket S3 bucket name
-   * @param key S3 key name
-   * @param progress report progress in order to prevent timeouts
-   * @param cannedACL used CannedAccessControlList
-   * @param partSize size of a single part in a multi-part upload (except
-   * last part)
-   * @param multiPartThreshold files at least this size use multi-part upload
-   * @param threadPoolExecutor thread factory
-   * @throws IOException on any problem
-   */
-  public S3AFastOutputStream(AmazonS3 client,
- 

[07/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ecbf323/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
deleted file mode 100644
index c25d0fb..000
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
+++ /dev/null
@@ -1,410 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.event.ProgressEvent;
-import com.amazonaws.event.ProgressListener;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
-import com.amazonaws.services.s3.model.CannedAccessControlList;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
-import com.amazonaws.services.s3.model.ObjectMetadata;
-import com.amazonaws.services.s3.model.PartETag;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
-import com.amazonaws.services.s3.model.UploadPartRequest;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.util.Progressable;
-import org.slf4j.Logger;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-
-import static org.apache.hadoop.fs.s3a.S3AUtils.*;
-import static org.apache.hadoop.fs.s3a.Statistic.*;
-
-/**
- * Upload files/parts asap directly from a memory buffer (instead of buffering
- * to a file).
- * 
- * Uploads are managed low-level rather than through the AWS TransferManager.
- * This allows for uploading each part of a multi-part upload as soon as
- * the bytes are in memory, rather than waiting until the file is closed.
- * 
- * Unstable: statistics and error handling might evolve
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class S3AFastOutputStream extends OutputStream {
-
-  private static final Logger LOG = S3AFileSystem.LOG;
-  private final String key;
-  private final String bucket;
-  private final AmazonS3 client;
-  private final int partSize;
-  private final int multiPartThreshold;
-  private final S3AFileSystem fs;
-  private final CannedAccessControlList cannedACL;
-  private final ProgressListener progressListener;
-  private final ListeningExecutorService executorService;
-  private MultiPartUpload multiPartUpload;
-  private boolean closed;
-  private ByteArrayOutputStream buffer;
-  private int bufferLimit;
-
-
-  /**
-   * Creates a fast OutputStream that uploads to S3 from memory.
-   * For MultiPartUploads, as soon as sufficient bytes have been written to
-   * the stream a part is uploaded immediately (by using the low-level
-   * multi-part upload API on the AmazonS3Client).
-   *
-   * @param client AmazonS3Client used for S3 calls
-   * @param fs S3AFilesystem
-   * @param bucket S3 bucket name
-   * @param key S3 key name
-   * @param progress report progress in order to prevent timeouts
-   * @param cannedACL used CannedAccessControlList
-   * @param partSize size of a single part in a multi-part upload (except
-   * last part)
-   * @param multiPartThreshold files at least this size use multi-part upload
-   * @param threadPoolExecutor thread factory
-   * @throws IOException on any problem
-   */
-  public S3AFastOutputStream(AmazonS3 client,
- 

[02/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc176961/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 67972ca..166fd73 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -1,3 +1,4 @@
+
 

[06/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ecbf323/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 67972ca..166fd73 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -1,3 +1,4 @@
+
 

[01/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3972bb3bb -> bc176961e
  refs/heads/branch-2.8 d26e11f30 -> 1ecbf323b
  refs/heads/trunk b733a6f86 -> 6c348c569


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc176961/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
new file mode 100644
index 000..a60d084
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
@@ -0,0 +1,412 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.amazonaws.event.ProgressEvent;
+import com.amazonaws.event.ProgressEventType;
+import com.amazonaws.event.ProgressListener;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.S3AFileStatus;
+import org.apache.hadoop.fs.s3a.Statistic;
+import org.apache.hadoop.util.Progressable;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+
+/**
+ * Scale test which creates a huge file.
+ *
+ * Important: the order in which these tests execute is fixed to
+ * alphabetical order. Test cases are numbered {@code test_123_} to impose
+ * an ordering based on the numbers.
+ *
+ * Having this ordering allows the tests to assume that the huge file
+ * exists. Even so: they should all have a {@link #assumeHugeFileExists()}
+ * check at the start, in case an individual test is executed.
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
+  private static final Logger LOG = LoggerFactory.getLogger(
+  AbstractSTestS3AHugeFiles.class);
+  public static final int DEFAULT_UPLOAD_BLOCKSIZE = 64 * _1KB;
+  public static final String DEFAULT_PARTITION_SIZE = "8M";
+  private Path scaleTestDir;
+  private Path hugefile;
+  private Path hugefileRenamed;
+
+  private int uploadBlockSize = DEFAULT_UPLOAD_BLOCKSIZE;
+  private int partitionSize;
+
+  @Override
+  public void setUp() throws Exception {
+super.setUp();
+
+final Path testPath = getTestPath();
+scaleTestDir = new Path(testPath, "scale");
+hugefile = new Path(scaleTestDir, "hugefile");
+hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+// do nothing. Specifically: do not delete the test dir
+  }
+
+  /**
+   * Note that this can get called before test setup.
+   * @return the configuration to use.
+   */
+  @Override
+  protected Configuration createConfiguration() {
+Configuration conf = super.createConfiguration();
+partitionSize = (int)getTestPropertyBytes(conf,
+KEY_HUGE_PARTITION_SIZE,
+DEFAULT_PARTITION_SIZE);
+assertTrue("Partition size too small: " + partitionSize,
+partitionSize > MULTIPART_MIN_SIZE);
+conf.setLong(SOCKET_SEND_BUFFER, _1MB);
+conf.setLong(SOCKET_RECV_BUFFER, _1MB);
+conf.setLong(MIN_MULTIPART_THRESHOLD, partitionSize);
+conf.setInt(MULTIPART_SIZE, partitionSize);
+conf.set(USER_AGENT_PREFIX, "STestS3AHugeFileCreate");
+conf.setBoolean(FAST_UPLOAD, true);
+conf.set(FAST_UPLOAD_BUFFER, getBlockOutputBufferName());
+return conf;
+  }
+
+  /**
+   * The name of the buffering mechanism to 

[08/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. 
Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ecbf323
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ecbf323
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ecbf323

Branch: refs/heads/branch-2.8
Commit: 1ecbf323ba50bec91fc661a972754cd944888d55
Parents: d26e11f
Author: Steve Loughran 
Authored: Tue Oct 18 19:33:38 2016 +0100
Committer: Steve Loughran 
Committed: Tue Oct 18 19:34:07 2016 +0100

--
 .../src/main/resources/core-default.xml |  74 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |  16 +-
 hadoop-tools/hadoop-aws/pom.xml |  58 +-
 .../s3a/BlockingThreadPoolExecutorService.java  | 184 +
 .../org/apache/hadoop/fs/s3a/Constants.java |  71 +-
 .../hadoop/fs/s3a/S3ABlockOutputStream.java | 703 
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java | 821 +++
 .../hadoop/fs/s3a/S3AFastOutputStream.java  | 410 -
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 418 --
 .../hadoop/fs/s3a/S3AInstrumentation.java   | 248 +-
 .../apache/hadoop/fs/s3a/S3AOutputStream.java   |  57 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  39 +
 .../fs/s3a/SemaphoredDelegatingExecutor.java| 230 ++
 .../org/apache/hadoop/fs/s3a/Statistic.java |  32 +-
 .../src/site/markdown/tools/hadoop-aws/index.md | 668 +--
 .../fs/contract/s3a/ITestS3AContractDistCp.java |  10 +-
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  |   1 +
 .../ITestBlockingThreadPoolExecutorService.java |  48 +-
 .../hadoop/fs/s3a/ITestS3ABlockOutputArray.java |  90 ++
 .../fs/s3a/ITestS3ABlockOutputByteBuffer.java   |  30 +
 .../hadoop/fs/s3a/ITestS3ABlockOutputDisk.java  |  30 +
 .../fs/s3a/ITestS3ABlockingThreadPool.java  |   2 +
 .../hadoop/fs/s3a/ITestS3AConfiguration.java|  29 +
 .../ITestS3AEncryptionBlockOutputStream.java|  36 +
 .../s3a/ITestS3AEncryptionFastOutputStream.java |  35 -
 .../hadoop/fs/s3a/ITestS3AFastOutputStream.java |  74 --
 .../apache/hadoop/fs/s3a/ITestS3ATestUtils.java |  98 +++
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |  75 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  | 148 +++-
 .../apache/hadoop/fs/s3a/TestDataBlocks.java| 124 +++
 .../ITestS3AFileContextStatistics.java  |   1 +
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java | 412 ++
 .../fs/s3a/scale/ITestS3ADeleteManyFiles.java   |  19 +-
 .../s3a/scale/ITestS3AHugeFilesArrayBlocks.java |  31 +
 .../ITestS3AHugeFilesByteBufferBlocks.java  |  34 +
 .../scale/ITestS3AHugeFilesClassicOutput.java   |  41 +
 .../s3a/scale/ITestS3AHugeFilesDiskBlocks.java  |  31 +
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 151 ++--
 38 files changed, 4655 insertions(+), 924 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ecbf323/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 8d913ee..ba40a83 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -896,8 +896,8 @@
 
   fs.s3a.threads.max
   10
-   Maximum number of concurrent active (part)uploads,
-which each use a thread from the threadpool.
+  The total number of threads available in the filesystem for data
+uploads *or any other queued filesystem operation*.
 
 
 
@@ -910,8 +910,7 @@
 
   fs.s3a.max.total.tasks
   5
-  Number of (part)uploads allowed to the queue before
-blocking additional uploads.
+  The number of operations which can be queued for 
execution
 
 
 
@@ -949,13 +948,21 @@
   fs.s3a.multipart.purge
   false
   True if you want to purge existing multipart uploads that may 
not have been
- completed/aborted correctly
+completed/aborted correctly. The corresponding purge age is defined in
+fs.s3a.multipart.purge.age.
+If set, when the filesystem is instantiated then all outstanding uploads
+older than the purge age will be terminated -across the entire bucket.
+This will impact multipart uploads by other applications and users. so 
should
+be used sparingly, with an age value chosen to stop failed uploads, without
+breaking ongoing operations.
+  
 
 
 
   fs.s3a.multipart.purge.age
   86400
-  Minimum age in seconds of multipart uploads to 
purge
+  Minimum age in seconds of multipart uploads to purge.
+  
 
 
 
@@ -988,10 +995,50 @@
 
   

[09/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c348c56/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
new file mode 100644
index 000..a60d084
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
@@ -0,0 +1,412 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.amazonaws.event.ProgressEvent;
+import com.amazonaws.event.ProgressEventType;
+import com.amazonaws.event.ProgressListener;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.S3AFileStatus;
+import org.apache.hadoop.fs.s3a.Statistic;
+import org.apache.hadoop.util.Progressable;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+
+/**
+ * Scale test which creates a huge file.
+ *
+ * Important: the order in which these tests execute is fixed to
+ * alphabetical order. Test cases are numbered {@code test_123_} to impose
+ * an ordering based on the numbers.
+ *
+ * Having this ordering allows the tests to assume that the huge file
+ * exists. Even so: they should all have a {@link #assumeHugeFileExists()}
+ * check at the start, in case an individual test is executed.
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
+  private static final Logger LOG = LoggerFactory.getLogger(
+  AbstractSTestS3AHugeFiles.class);
+  public static final int DEFAULT_UPLOAD_BLOCKSIZE = 64 * _1KB;
+  public static final String DEFAULT_PARTITION_SIZE = "8M";
+  private Path scaleTestDir;
+  private Path hugefile;
+  private Path hugefileRenamed;
+
+  private int uploadBlockSize = DEFAULT_UPLOAD_BLOCKSIZE;
+  private int partitionSize;
+
+  @Override
+  public void setUp() throws Exception {
+super.setUp();
+
+final Path testPath = getTestPath();
+scaleTestDir = new Path(testPath, "scale");
+hugefile = new Path(scaleTestDir, "hugefile");
+hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+// do nothing. Specifically: do not delete the test dir
+  }
+
+  /**
+   * Note that this can get called before test setup.
+   * @return the configuration to use.
+   */
+  @Override
+  protected Configuration createConfiguration() {
+Configuration conf = super.createConfiguration();
+partitionSize = (int)getTestPropertyBytes(conf,
+KEY_HUGE_PARTITION_SIZE,
+DEFAULT_PARTITION_SIZE);
+assertTrue("Partition size too small: " + partitionSize,
+partitionSize > MULTIPART_MIN_SIZE);
+conf.setLong(SOCKET_SEND_BUFFER, _1MB);
+conf.setLong(SOCKET_RECV_BUFFER, _1MB);
+conf.setLong(MIN_MULTIPART_THRESHOLD, partitionSize);
+conf.setInt(MULTIPART_SIZE, partitionSize);
+conf.set(USER_AGENT_PREFIX, "STestS3AHugeFileCreate");
+conf.setBoolean(FAST_UPLOAD, true);
+conf.set(FAST_UPLOAD_BUFFER, getBlockOutputBufferName());
+return conf;
+  }
+
+  /**
+   * The name of the buffering mechanism to use.
+   * @return a buffering mechanism
+   */
+  protected abstract String getBlockOutputBufferName();
+
+  @Test
+  public void test_010_CreateHugeFile() throws 

[05/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ecbf323/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
new file mode 100644
index 000..a60d084
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
@@ -0,0 +1,412 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.amazonaws.event.ProgressEvent;
+import com.amazonaws.event.ProgressEventType;
+import com.amazonaws.event.ProgressListener;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.S3AFileStatus;
+import org.apache.hadoop.fs.s3a.Statistic;
+import org.apache.hadoop.util.Progressable;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+
+/**
+ * Scale test which creates a huge file.
+ *
+ * Important: the order in which these tests execute is fixed to
+ * alphabetical order. Test cases are numbered {@code test_123_} to impose
+ * an ordering based on the numbers.
+ *
+ * Having this ordering allows the tests to assume that the huge file
+ * exists. Even so: they should all have a {@link #assumeHugeFileExists()}
+ * check at the start, in case an individual test is executed.
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
+  private static final Logger LOG = LoggerFactory.getLogger(
+  AbstractSTestS3AHugeFiles.class);
+  public static final int DEFAULT_UPLOAD_BLOCKSIZE = 64 * _1KB;
+  public static final String DEFAULT_PARTITION_SIZE = "8M";
+  private Path scaleTestDir;
+  private Path hugefile;
+  private Path hugefileRenamed;
+
+  private int uploadBlockSize = DEFAULT_UPLOAD_BLOCKSIZE;
+  private int partitionSize;
+
+  @Override
+  public void setUp() throws Exception {
+super.setUp();
+
+final Path testPath = getTestPath();
+scaleTestDir = new Path(testPath, "scale");
+hugefile = new Path(scaleTestDir, "hugefile");
+hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+// do nothing. Specifically: do not delete the test dir
+  }
+
+  /**
+   * Note that this can get called before test setup.
+   * @return the configuration to use.
+   */
+  @Override
+  protected Configuration createConfiguration() {
+Configuration conf = super.createConfiguration();
+partitionSize = (int)getTestPropertyBytes(conf,
+KEY_HUGE_PARTITION_SIZE,
+DEFAULT_PARTITION_SIZE);
+assertTrue("Partition size too small: " + partitionSize,
+partitionSize > MULTIPART_MIN_SIZE);
+conf.setLong(SOCKET_SEND_BUFFER, _1MB);
+conf.setLong(SOCKET_RECV_BUFFER, _1MB);
+conf.setLong(MIN_MULTIPART_THRESHOLD, partitionSize);
+conf.setInt(MULTIPART_SIZE, partitionSize);
+conf.set(USER_AGENT_PREFIX, "STestS3AHugeFileCreate");
+conf.setBoolean(FAST_UPLOAD, true);
+conf.set(FAST_UPLOAD_BUFFER, getBlockOutputBufferName());
+return conf;
+  }
+
+  /**
+   * The name of the buffering mechanism to use.
+   * @return a buffering mechanism
+   */
+  protected abstract String getBlockOutputBufferName();
+
+  @Test
+  public void test_010_CreateHugeFile() throws 

[12/12] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread stevel
HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. 
Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c348c56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c348c56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c348c56

Branch: refs/heads/trunk
Commit: 6c348c56918973fd988b110e79231324a8befe12
Parents: b733a6f
Author: Steve Loughran 
Authored: Tue Oct 18 19:33:38 2016 +0100
Committer: Steve Loughran 
Committed: Tue Oct 18 21:16:02 2016 +0100

--
 .../src/main/resources/core-default.xml |  74 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |  16 +-
 hadoop-tools/hadoop-aws/pom.xml |  58 +-
 .../s3a/BlockingThreadPoolExecutorService.java  | 168 +---
 .../org/apache/hadoop/fs/s3a/Constants.java |  71 +-
 .../hadoop/fs/s3a/S3ABlockOutputStream.java | 703 
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java | 821 +++
 .../hadoop/fs/s3a/S3AFastOutputStream.java  | 410 -
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 408 +++--
 .../hadoop/fs/s3a/S3AInstrumentation.java   | 248 +-
 .../apache/hadoop/fs/s3a/S3AOutputStream.java   |  57 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  39 +
 .../fs/s3a/SemaphoredDelegatingExecutor.java| 230 ++
 .../org/apache/hadoop/fs/s3a/Statistic.java |  32 +-
 .../src/site/markdown/tools/hadoop-aws/index.md | 668 +--
 .../fs/contract/s3a/ITestS3AContractDistCp.java |  10 +-
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  |   1 +
 .../ITestBlockingThreadPoolExecutorService.java |  48 +-
 .../hadoop/fs/s3a/ITestS3ABlockOutputArray.java |  90 ++
 .../fs/s3a/ITestS3ABlockOutputByteBuffer.java   |  30 +
 .../hadoop/fs/s3a/ITestS3ABlockOutputDisk.java  |  30 +
 .../fs/s3a/ITestS3ABlockingThreadPool.java  |   2 +
 .../hadoop/fs/s3a/ITestS3AConfiguration.java|  29 +
 .../ITestS3AEncryptionBlockOutputStream.java|  36 +
 .../s3a/ITestS3AEncryptionFastOutputStream.java |  35 -
 .../hadoop/fs/s3a/ITestS3AFastOutputStream.java |  74 --
 .../apache/hadoop/fs/s3a/ITestS3ATestUtils.java |  98 +++
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |  75 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  | 148 +++-
 .../apache/hadoop/fs/s3a/TestDataBlocks.java| 124 +++
 .../ITestS3AFileContextStatistics.java  |   1 +
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java | 412 ++
 .../fs/s3a/scale/ITestS3ADeleteManyFiles.java   |  19 +-
 .../s3a/scale/ITestS3AHugeFilesArrayBlocks.java |  31 +
 .../ITestS3AHugeFilesByteBufferBlocks.java  |  34 +
 .../scale/ITestS3AHugeFilesClassicOutput.java   |  41 +
 .../s3a/scale/ITestS3AHugeFilesDiskBlocks.java  |  31 +
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 151 ++--
 38 files changed, 4647 insertions(+), 906 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c348c56/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 4882728..daa421c 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -994,8 +994,8 @@
 
   fs.s3a.threads.max
   10
-   Maximum number of concurrent active (part)uploads,
-which each use a thread from the threadpool.
+  The total number of threads available in the filesystem for data
+uploads *or any other queued filesystem operation*.
 
 
 
@@ -1008,8 +1008,7 @@
 
   fs.s3a.max.total.tasks
   5
-  Number of (part)uploads allowed to the queue before
-blocking additional uploads.
+  The number of operations which can be queued for 
execution
 
 
 
@@ -1047,13 +1046,21 @@
   fs.s3a.multipart.purge
   false
   True if you want to purge existing multipart uploads that may 
not have been
- completed/aborted correctly
+completed/aborted correctly. The corresponding purge age is defined in
+fs.s3a.multipart.purge.age.
+If set, when the filesystem is instantiated then all outstanding uploads
+older than the purge age will be terminated -across the entire bucket.
+This will impact multipart uploads by other applications and users. so 
should
+be used sparingly, with an age value chosen to stop failed uploads, without
+breaking ongoing operations.
+  
 
 
 
   fs.s3a.multipart.purge.age
   86400
-  Minimum age in seconds of multipart uploads to 
purge
+  Minimum age in seconds of multipart uploads to purge.
+  
 
 
 
@@ -1086,10 +1093,50 @@
 
   

hadoop git commit: HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by Hanisha Koneru.

2016-10-18 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6c348c569 -> c62ae7107


HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c62ae710
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c62ae710
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c62ae710

Branch: refs/heads/trunk
Commit: c62ae7107f025091652e79db3edfca5c4dc84e4a
Parents: 6c348c5
Author: Xiaoyu Yao 
Authored: Mon Oct 17 15:25:24 2016 -0700
Committer: Xiaoyu Yao 
Committed: Tue Oct 18 14:05:43 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  32 +-
 .../hdfs/TestTrashWithEncryptionZones.java  | 188 
 .../TestTrashWithSecureEncryptionZones.java | 443 +++
 3 files changed, 662 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62ae710/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 963aaa6..7f26b03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -27,6 +27,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.BufferedOutputStream;
@@ -114,7 +115,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -167,6 +167,7 @@ import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Level;
 import org.junit.Assume;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.annotations.VisibleForTesting;
 import static 
org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
@@ -2054,4 +2055,33 @@ public class DFSTestUtil {
   }
 }
   }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+  boolean shouldExistInTrash) throws Exception {
+Path trashPath = Path.mergePaths(shell.getCurrentTrashDir(path), path);
+
+verifyDelete(shell, fs, path, trashPath, shouldExistInTrash);
+  }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+  Path trashPath, boolean shouldExistInTrash) throws Exception {
+assertTrue(path + " file does not exist", fs.exists(path));
+
+// Verify that trashPath has a path component named ".Trash"
+Path checkTrash = trashPath;
+while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
+  checkTrash = checkTrash.getParent();
+}
+assertEquals("No .Trash component found in trash path " + trashPath,
+".Trash", checkTrash.getName());
+
+String[] argv = new String[]{"-rm", "-r", path.toString()};
+int res = ToolRunner.run(shell, argv);
+assertEquals("rm failed", 0, res);
+if (shouldExistInTrash) {
+  assertTrue("File not in trash : " + trashPath, fs.exists(trashPath));
+} else {
+  assertFalse("File in trash : " + trashPath, fs.exists(trashPath));
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62ae710/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
new file mode 100644
index 000..2a8d493
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more 

hadoop git commit: HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by Hanisha Koneru.

2016-10-18 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bc176961e -> 74f1c49d7


HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by 
Hanisha Koneru.

(cherry picked from commit 0c79d5af74944363f3d04f7e1702411db564bbfd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74f1c49d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74f1c49d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74f1c49d

Branch: refs/heads/branch-2
Commit: 74f1c49d76f24694b827f33f492718abb56a902b
Parents: bc17696
Author: Xiaoyu Yao 
Authored: Mon Oct 17 15:25:24 2016 -0700
Committer: Xiaoyu Yao 
Committed: Tue Oct 18 14:07:57 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  31 ++
 .../hdfs/TestTrashWithEncryptionZones.java  | 188 
 .../TestTrashWithSecureEncryptionZones.java | 443 +++
 3 files changed, 662 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74f1c49d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index bb8b2d3..3eda008 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -27,6 +27,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.BufferedOutputStream;
@@ -157,6 +158,7 @@ import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Level;
 import org.junit.Assume;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
@@ -1973,4 +1975,33 @@ public class DFSTestUtil {
   }
 }
   }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+  boolean shouldExistInTrash) throws Exception {
+Path trashPath = Path.mergePaths(shell.getCurrentTrashDir(path), path);
+
+verifyDelete(shell, fs, path, trashPath, shouldExistInTrash);
+  }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+  Path trashPath, boolean shouldExistInTrash) throws Exception {
+assertTrue(path + " file does not exist", fs.exists(path));
+
+// Verify that trashPath has a path component named ".Trash"
+Path checkTrash = trashPath;
+while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
+  checkTrash = checkTrash.getParent();
+}
+assertEquals("No .Trash component found in trash path " + trashPath,
+".Trash", checkTrash.getName());
+
+String[] argv = new String[]{"-rm", "-r", path.toString()};
+int res = ToolRunner.run(shell, argv);
+assertEquals("rm failed", 0, res);
+if (shouldExistInTrash) {
+  assertTrue("File not in trash : " + trashPath, fs.exists(trashPath));
+} else {
+  assertFalse("File in trash : " + trashPath, fs.exists(trashPath));
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74f1c49d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
new file mode 100644
index 000..2a8d493
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed 

hadoop git commit: HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by Hanisha Koneru.

2016-10-18 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 1ecbf323b -> 190a53b89


HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by 
Hanisha Koneru.

(cherry picked from commit 0c79d5af74944363f3d04f7e1702411db564bbfd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/190a53b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/190a53b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/190a53b8

Branch: refs/heads/branch-2.8
Commit: 190a53b898cb9432cb70a915b30ec47049107433
Parents: 1ecbf32
Author: Xiaoyu Yao 
Authored: Mon Oct 17 15:25:24 2016 -0700
Committer: Xiaoyu Yao 
Committed: Tue Oct 18 14:09:03 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  31 ++
 .../hdfs/TestTrashWithEncryptionZones.java  | 188 
 .../TestTrashWithSecureEncryptionZones.java | 443 +++
 3 files changed, 662 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/190a53b8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 67e9e54..b9aa1e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -27,6 +27,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.BufferedOutputStream;
@@ -157,6 +158,7 @@ import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Level;
 import org.junit.Assume;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
@@ -1970,4 +1972,33 @@ public class DFSTestUtil {
   }
 }
   }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+  boolean shouldExistInTrash) throws Exception {
+Path trashPath = Path.mergePaths(shell.getCurrentTrashDir(path), path);
+
+verifyDelete(shell, fs, path, trashPath, shouldExistInTrash);
+  }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+  Path trashPath, boolean shouldExistInTrash) throws Exception {
+assertTrue(path + " file does not exist", fs.exists(path));
+
+// Verify that trashPath has a path component named ".Trash"
+Path checkTrash = trashPath;
+while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
+  checkTrash = checkTrash.getParent();
+}
+assertEquals("No .Trash component found in trash path " + trashPath,
+".Trash", checkTrash.getName());
+
+String[] argv = new String[]{"-rm", "-r", path.toString()};
+int res = ToolRunner.run(shell, argv);
+assertEquals("rm failed", 0, res);
+if (shouldExistInTrash) {
+  assertTrue("File not in trash : " + trashPath, fs.exists(trashPath));
+} else {
+  assertFalse("File in trash : " + trashPath, fs.exists(trashPath));
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/190a53b8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
new file mode 100644
index 000..2a8d493
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * 

hadoop git commit: MAPREDUCE-6791. remove unnecessary dependency from hadoop-mapreduce-client-jobclient to hadoop-mapreduce-client-shuffle (haibochen via rkanter)

2016-10-18 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk c62ae7107 -> 29caf6d7d


MAPREDUCE-6791. remove unnecessary dependency from 
hadoop-mapreduce-client-jobclient to hadoop-mapreduce-client-shuffle (haibochen 
via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29caf6d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29caf6d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29caf6d7

Branch: refs/heads/trunk
Commit: 29caf6d7df54a77b11399bb5f73d62b3b38ae912
Parents: c62ae71
Author: Robert Kanter 
Authored: Tue Oct 18 18:00:29 2016 -0700
Committer: Robert Kanter 
Committed: Tue Oct 18 18:00:29 2016 -0700

--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29caf6d7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index c4fef7e..cd181fe 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -42,10 +42,6 @@
 
 
   org.apache.hadoop
-  hadoop-mapreduce-client-shuffle
-
-
-  org.apache.hadoop
   hadoop-mapreduce-client-app
   test
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-7352. FileSystem#listStatus should throw IOE upon access error. Contributed by John Zhuge.

2016-10-18 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 29caf6d7d -> efdf810cf


HADOOP-7352. FileSystem#listStatus should throw IOE upon access error. 
Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efdf810c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efdf810c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efdf810c

Branch: refs/heads/trunk
Commit: efdf810cf9f72d78e97e860576c64a382ece437c
Parents: 29caf6d
Author: Xiao Chen 
Authored: Tue Oct 18 18:18:43 2016 -0700
Committer: Xiao Chen 
Committed: Tue Oct 18 18:18:43 2016 -0700

--
 .../java/org/apache/hadoop/fs/FileSystem.java   | 14 +---
 .../apache/hadoop/fs/RawLocalFileSystem.java|  5 +---
 .../src/site/markdown/filesystem/filesystem.md  |  3 +++
 .../hadoop/fs/FSMainOperationsBaseTest.java | 24 +---
 .../apache/hadoop/fs/shell/TestPathData.java| 19 
 .../apache/hadoop/hdfs/web/TestTokenAspect.java |  6 ++---
 .../apache/hadoop/tools/TestDistCpWithAcls.java |  2 +-
 .../hadoop/tools/TestDistCpWithXAttrs.java  |  2 +-
 8 files changed, 54 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efdf810c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index cc062c4..39b5b95 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1524,13 +1524,14 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* 
* Does not guarantee to return the List of files/directories status in a
* sorted order.
+   * 
+   * Will not return null. Expect IOException upon access error.
* @param f given path
* @return the statuses of the files/directories in the given patch
-   * @throws FileNotFoundException when the path does not exist;
-   * IOException see specific implementation
+   * @throws FileNotFoundException when the path does not exist
+   * @throws IOException see specific implementation
*/
-  public abstract FileStatus[] listStatus(Path f) throws 
FileNotFoundException, 
- IOException;
+  public abstract FileStatus[] listStatus(Path f) throws IOException;
 
   /**
* Represents a batch of directory entries when iteratively listing a
@@ -1600,10 +1601,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   private void listStatus(ArrayList results, Path f,
   PathFilter filter) throws FileNotFoundException, IOException {
 FileStatus listing[] = listStatus(f);
-if (listing == null) {
-  throw new IOException("Error accessing " + f);
-}
-
+Preconditions.checkNotNull(listing, "listStatus should not return NULL");
 for (int i = 0; i < listing.length; i++) {
   if (filter.accept(listing[i].getPath())) {
 results.add(listing[i]);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efdf810c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 0fcddcf..5e6cb05 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -466,10 +466,7 @@ public class RawLocalFileSystem extends FileSystem {
 }
 
 if (localf.isDirectory()) {
-  String[] names = localf.list();
-  if (names == null) {
-return null;
-  }
+  String[] names = FileUtil.list(localf);
   results = new FileStatus[names.length];
   int j = 0;
   for (int i = 0; i < names.length; i++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efdf810c/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index d927b8b..063bd97 100644
--- 

[27/50] [abbrv] hadoop git commit: HDFS-11003. Expose XmitsInProgress through DataNodeMXBean. Contributed By Brahma Reddy Battula

2016-10-18 Thread sjlee
HDFS-11003. Expose XmitsInProgress through DataNodeMXBean. Contributed By 
Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f4ae85b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f4ae85b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f4ae85b

Branch: refs/heads/HADOOP-13070
Commit: 5f4ae85bd8a20510948696467873498723b06477
Parents: 5ad037d
Author: Brahma Reddy Battula 
Authored: Sat Oct 15 22:28:33 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Sat Oct 15 22:28:33 2016 +0530

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java  | 5 +++--
 .../org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java | 6 ++
 .../apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java | 6 +-
 3 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index cb8e308..8f65efe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2101,8 +2101,9 @@ public class DataNode extends ReconfigurableBase
   }
 }
   }
-  
-  int getXmitsInProgress() {
+
+  @Override //DataNodeMXBean
+  public int getXmitsInProgress() {
 return xmitsInProgress.get();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
index 5ec4cda..5d4c218 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
@@ -101,6 +101,12 @@ public interface DataNodeMXBean {
   public int getXceiverCount();
 
   /**
+   * Returns an estimate of the number of data replication/reconstruction tasks
+   * running currently.
+   */
+  public int getXmitsInProgress();
+
+  /**
* Gets the network error counts on a per-Datanode basis.
*/
   public Map> getDatanodeNetworkCounts();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index 8b0d5cb..a77c943 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -96,7 +96,11 @@ public class TestDataNodeMXBean {
   int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
   "XceiverCount");
   Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
-
+  // Ensure mxbean's XmitsInProgress is same as the DataNode's
+  // live value.
+  int xmitsInProgress =
+  (Integer) mbs.getAttribute(mxbeanName, "XmitsInProgress");
+  Assert.assertEquals(datanode.getXmitsInProgress(), xmitsInProgress);
   String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
   "BPServiceActorInfo");
   Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: HDFS-10920. TestStorageMover#testNoSpaceDisk is failing intermittently. Contributed by Rakesh R

2016-10-18 Thread sjlee
HDFS-10920. TestStorageMover#testNoSpaceDisk is failing intermittently. 
Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d26a1bb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d26a1bb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d26a1bb9

Branch: refs/heads/HADOOP-13070
Commit: d26a1bb9d60f50763887d66399579bac7ca81982
Parents: c023c74
Author: Kai Zheng 
Authored: Tue Oct 18 14:51:08 2016 +0600
Committer: Kai Zheng 
Committed: Tue Oct 18 14:51:08 2016 +0600

--
 .../hadoop/hdfs/server/mover/TestStorageMover.java | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d26a1bb9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index 92a70a0..1b5bd81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@ -613,8 +613,10 @@ public class TestStorageMover {
   }
 
   private void waitForAllReplicas(int expectedReplicaNum, Path file,
-  DistributedFileSystem dfs) throws Exception {
-for (int i = 0; i < 5; i++) {
+  DistributedFileSystem dfs, int retryCount) throws Exception {
+LOG.info("Waiting for replicas count " + expectedReplicaNum
++ ", file name: " + file);
+for (int i = 0; i < retryCount; i++) {
   LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0,
   BLOCK_SIZE);
   LocatedBlock lb = lbs.get(0);
@@ -664,7 +666,7 @@ public class TestStorageMover {
   for (int i = 0; i < 2; i++) {
 final Path p = new Path(pathPolicyMap.hot, "file" + i);
 DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
-waitForAllReplicas(replication, p, test.dfs);
+waitForAllReplicas(replication, p, test.dfs, 10);
   }
 
   // set all the DISK volume to full
@@ -679,16 +681,17 @@ public class TestStorageMover {
   final Replication r = test.getReplication(file0);
   final short newReplication = (short) 5;
   test.dfs.setReplication(file0, newReplication);
-  Thread.sleep(1);
+  waitForAllReplicas(newReplication, file0, test.dfs, 10);
   test.verifyReplication(file0, r.disk, newReplication - r.disk);
 
   // test creating a cold file and then increase replication
   final Path p = new Path(pathPolicyMap.cold, "foo");
   DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
+  waitForAllReplicas(replication, p, test.dfs, 10);
   test.verifyReplication(p, 0, replication);
 
   test.dfs.setReplication(p, newReplication);
-  Thread.sleep(1);
+  waitForAllReplicas(newReplication, p, test.dfs, 10);
   test.verifyReplication(p, 0, newReplication);
 
   //test move a hot file to warm
@@ -722,7 +725,7 @@ public class TestStorageMover {
   for (int i = 0; i < 2; i++) {
 final Path p = new Path(pathPolicyMap.cold, "file" + i);
 DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
-waitForAllReplicas(replication, p, test.dfs);
+waitForAllReplicas(replication, p, test.dfs, 10);
   }
 
   // set all the ARCHIVE volume to full
@@ -739,7 +742,7 @@ public class TestStorageMover {
 
 final short newReplication = (short) 5;
 test.dfs.setReplication(file0, newReplication);
-Thread.sleep(1);
+waitForAllReplicas(r.archive, file0, test.dfs, 10);
 
 test.verifyReplication(file0, 0, r.archive);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: HDFS-10960. TestDataNodeHotSwapVolumes#testRemoveVolumeBeingWritten fails at disk error verification after volume remove. (Manoj Govindassamy via lei)

2016-10-18 Thread sjlee
HDFS-10960. TestDataNodeHotSwapVolumes#testRemoveVolumeBeingWritten fails at 
disk error verification after volume remove. (Manoj Govindassamy via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c520a27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c520a27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c520a27

Branch: refs/heads/HADOOP-13070
Commit: 8c520a27cbd9daba05367d3a83017a2eab5258eb
Parents: adb96e1
Author: Lei Xu 
Authored: Fri Oct 14 13:41:59 2016 -0700
Committer: Lei Xu 
Committed: Fri Oct 14 13:41:59 2016 -0700

--
 .../datanode/TestDataNodeHotSwapVolumes.java | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c520a27/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 06387c5..83c231d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -642,8 +642,6 @@ public class TestDataNodeHotSwapVolumes {
 final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
 final FileSystem fs = cluster.getFileSystem();
 final Path testFile = new Path("/test");
-final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck();
-
 FSDataOutputStream out = fs.create(testFile, REPLICATION);
 
 Random rb = new Random(0);
@@ -699,17 +697,24 @@ public class TestDataNodeHotSwapVolumes {
 
 reconfigThread.join();
 
+// Verify if the data directory reconfigure was successful
+FsDatasetSpi fsDatasetSpi = dn.getFSDataset();
+try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
+.getFsVolumeReferences()) {
+  for (int i =0; i < fsVolumeReferences.size(); i++) {
+System.out.println("Vol: " +
+fsVolumeReferences.get(i).getBaseURI().toString());
+  }
+  assertEquals("Volume remove wasn't successful.",
+  1, fsVolumeReferences.size());
+}
+
 // Verify the file has sufficient replications.
 DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
 // Read the content back
 byte[] content = DFSTestUtil.readFileBuffer(fs, testFile);
 assertEquals(BLOCK_SIZE, content.length);
 
-// If an IOException thrown from BlockReceiver#run, it triggers
-// DataNode#checkDiskError(). So we can test whether checkDiskError() is 
called,
-// to see whether there is IOException in BlockReceiver#run().
-assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck());
-
 if (!exceptions.isEmpty()) {
   throw new IOException(exceptions.get(0).getCause());
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: HADOOP-13061. Refactor erasure coders. Contributed by Kai Sasaki

2016-10-18 Thread sjlee
HADOOP-13061. Refactor erasure coders. Contributed by Kai Sasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c023c748
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c023c748
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c023c748

Branch: refs/heads/HADOOP-13070
Commit: c023c748869063fb67d14ea996569c42578d1cea
Parents: bedfec0
Author: Kai Zheng 
Authored: Tue Oct 18 12:02:53 2016 +0600
Committer: Kai Zheng 
Committed: Tue Oct 18 12:02:53 2016 +0600

--
 .../hadoop/fs/CommonConfigurationKeys.java  |  26 ---
 .../apache/hadoop/io/erasurecode/CodecUtil.java | 168 ++--
 .../io/erasurecode/ErasureCodeConstants.java|   3 +-
 .../io/erasurecode/ErasureCodecOptions.java |  37 
 .../erasurecode/codec/AbstractErasureCodec.java |  53 -
 .../io/erasurecode/codec/DummyErasureCodec.java |  45 +
 .../io/erasurecode/codec/ErasureCodec.java  |  76 +--
 .../io/erasurecode/codec/HHXORErasureCodec.java |  20 +-
 .../io/erasurecode/codec/RSErasureCodec.java|  20 +-
 .../io/erasurecode/codec/XORErasureCodec.java   |  22 ++-
 .../io/erasurecode/codec/package-info.java  |  28 +++
 .../erasurecode/coder/AbstractErasureCoder.java |  64 --
 .../coder/AbstractErasureCodingStep.java|  61 --
 .../coder/AbstractErasureDecoder.java   | 170 
 .../coder/AbstractErasureEncoder.java   |  62 --
 .../coder/AbstractHHErasureCodingStep.java  |  49 -
 .../erasurecode/coder/DummyErasureDecoder.java  |  46 +
 .../erasurecode/coder/DummyErasureEncoder.java  |  45 +
 .../io/erasurecode/coder/ErasureCoder.java  |  25 ++-
 .../io/erasurecode/coder/ErasureCodingStep.java |   8 +-
 .../io/erasurecode/coder/ErasureDecoder.java| 198 +++
 .../erasurecode/coder/ErasureDecodingStep.java  |  21 +-
 .../io/erasurecode/coder/ErasureEncoder.java|  91 +
 .../erasurecode/coder/ErasureEncodingStep.java  |  22 ++-
 .../erasurecode/coder/HHErasureCodingStep.java  |  68 +++
 .../erasurecode/coder/HHXORErasureDecoder.java  |  24 +--
 .../coder/HHXORErasureDecodingStep.java |   2 +-
 .../erasurecode/coder/HHXORErasureEncoder.java  |  19 +-
 .../coder/HHXORErasureEncodingStep.java |   2 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  |  16 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  20 +-
 .../io/erasurecode/coder/XORErasureDecoder.java |  15 +-
 .../io/erasurecode/coder/XORErasureEncoder.java |  16 +-
 .../io/erasurecode/coder/package-info.java  |  28 +++
 .../io/erasurecode/rawcoder/CoderUtil.java  |   2 +-
 .../conf/TestCommonConfigurationFields.java |   5 +-
 .../erasurecode/TestCodecRawCoderMapping.java   |   3 +-
 .../codec/TestHHXORErasureCodec.java|   6 +-
 .../erasurecode/coder/TestErasureCoderBase.java |  13 +-
 .../coder/TestHHXORErasureCoder.java|   4 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |   4 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |   3 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   6 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |   4 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |   4 +-
 45 files changed, 964 insertions(+), 660 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c023c748/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 2b530f0..fe522b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -21,9 +21,6 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
-import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
-import 
org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy;
-import org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory;
 
 /** 
  * This class contains constants for configuration keys used
@@ -160,30 +157,7 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final boolean IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT =
   false;
 
-  /**
-   * Erasure Coding configuration family
-   */
 
-  /** Supported erasure 

[38/50] [abbrv] hadoop git commit: Revert "HDFS-9820. Improve distcp to support efficient restore to an earlier snapshot. Contributed by Yongjun Zhang."

2016-10-18 Thread sjlee
Revert "HDFS-9820. Improve distcp to support efficient restore to an earlier 
snapshot. Contributed by Yongjun Zhang."

This reverts commit 412c4c9a342b73bf1c1a7f43ea91245cbf94d02d.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bc6d37f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bc6d37f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bc6d37f

Branch: refs/heads/HADOOP-13070
Commit: 0bc6d37f3c1e7c2a8682dffa95461a884bd6ba17
Parents: b61fb26
Author: Yongjun Zhang 
Authored: Mon Oct 17 22:47:37 2016 -0700
Committer: Yongjun Zhang 
Committed: Mon Oct 17 22:47:37 2016 -0700

--
 .../java/org/apache/hadoop/tools/DiffInfo.java  |  47 +-
 .../java/org/apache/hadoop/tools/DistCp.java|  34 +-
 .../apache/hadoop/tools/DistCpConstants.java|   1 -
 .../apache/hadoop/tools/DistCpOptionSwitch.java |   5 -
 .../org/apache/hadoop/tools/DistCpOptions.java  |  79 +-
 .../org/apache/hadoop/tools/DistCpSync.java | 256 ++
 .../org/apache/hadoop/tools/OptionsParser.java  |  27 +-
 .../apache/hadoop/tools/SimpleCopyListing.java  |  17 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java |   4 +-
 .../hadoop/tools/TestDistCpSyncReverseBase.java | 868 ---
 .../tools/TestDistCpSyncReverseFromSource.java  |  36 -
 .../tools/TestDistCpSyncReverseFromTarget.java  |  36 -
 .../apache/hadoop/tools/TestOptionsParser.java  |  85 +-
 13 files changed, 155 insertions(+), 1340 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc6d37f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
index 7e56301..79bb7fe 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
@@ -44,49 +44,28 @@ class DiffInfo {
   };
 
   /** The source file/dir of the rename or deletion op */
-  private Path source;
-  /** The target file/dir of the rename op. Null means the op is deletion. */
-  private Path target;
-
-  private SnapshotDiffReport.DiffType type;
+  final Path source;
   /**
* The intermediate file/dir for the op. For a rename or a delete op,
* we first rename the source to this tmp file/dir.
*/
   private Path tmp;
+  /** The target file/dir of the rename op. Null means the op is deletion. */
+  Path target;
 
-  DiffInfo(final Path source, final Path target,
-  SnapshotDiffReport.DiffType type) {
-assert source != null;
-this.source = source;
-this.target= target;
-this.type = type;
-  }
-
-  void setSource(final Path source) {
-this.source = source;
-  }
-
-  Path getSource() {
-return source;
-  }
-
-  void setTarget(final Path target) {
-this.target = target;
-  }
+  private final SnapshotDiffReport.DiffType type;
 
-  Path getTarget() {
-return target;
+  public SnapshotDiffReport.DiffType getType(){
+return this.type;
   }
 
-  public void setType(final SnapshotDiffReport.DiffType type){
+  DiffInfo(Path source, Path target, SnapshotDiffReport.DiffType type) {
+assert source != null;
+this.source = source;
+this.target= target;
 this.type = type;
   }
 
-  public SnapshotDiffReport.DiffType getType(){
-return type;
-  }
-
   void setTmp(Path tmp) {
 this.tmp = tmp;
   }
@@ -94,10 +73,4 @@ class DiffInfo {
   Path getTmp() {
 return tmp;
   }
-
-  @Override
-  public String toString() {
-return type + ": src=" + String.valueOf(source) + " tgt="
-+ String.valueOf(target) + " tmp=" + String.valueOf(tmp);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc6d37f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index e9decd2..be58f13 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -77,21 +77,6 @@ public class DistCp extends Configured implements Tool {
   private boolean submitted;
   private FileSystem jobFS;
 
-  private void prepareFileListing(Job job) throws Exception {
-if (inputOptions.shouldUseSnapshotDiff()) {
-  try {
-DistCpSync distCpSync = new DistCpSync(inputOptions, getConf());
-distCpSync.sync();
-

hadoop git commit: HADOOP-13693. Remove the message about HTTP OPTIONS in SPNEGO initialization message from kms audit log.

2016-10-18 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk efdf810cf -> d75cbc574


HADOOP-13693. Remove the message about HTTP OPTIONS in SPNEGO initialization 
message from kms audit log.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d75cbc57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d75cbc57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d75cbc57

Branch: refs/heads/trunk
Commit: d75cbc5749808491d2b06f80506d95b6fb1b9e9c
Parents: efdf810
Author: Xiao Chen 
Authored: Tue Oct 18 18:24:37 2016 -0700
Committer: Xiao Chen 
Committed: Tue Oct 18 18:24:59 2016 -0700

--
 .../crypto/key/kms/server/KMSAuthenticationFilter.java| 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d75cbc57/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
index 60f1918..928a8aa 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
@@ -145,9 +145,13 @@ public class KMSAuthenticationFilter
 requestURL.append("?").append(queryString);
   }
 
-  KMSWebApp.getKMSAudit().unauthenticated(
-  request.getRemoteHost(), method, requestURL.toString(),
-  kmsResponse.msg);
+  if (!method.equals("OPTIONS")) {
+// an HTTP OPTIONS request is made as part of the SPNEGO authentication
+// sequence. We do not need to audit log it, since it doesn't belong
+// to KMS context. KMS server doesn't handle OPTIONS either.
+KMSWebApp.getKMSAudit().unauthenticated(request.getRemoteHost(), 
method,
+requestURL.toString(), kmsResponse.msg);
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-12082 Support multiple authentication schemes via AuthenticationFilter

2016-10-18 Thread benoy
Repository: hadoop
Updated Branches:
  refs/heads/trunk d75cbc574 -> 4bca38524


HADOOP-12082 Support multiple authentication schemes via AuthenticationFilter


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bca3852
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bca3852
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bca3852

Branch: refs/heads/trunk
Commit: 4bca385241c0fc8ff168c7b0f2984a7aed2c7492
Parents: d75cbc5
Author: Benoy Antony 
Authored: Tue Oct 18 18:32:01 2016 -0700
Committer: Benoy Antony 
Committed: Tue Oct 18 18:32:01 2016 -0700

--
 hadoop-common-project/hadoop-auth/pom.xml   |  42 +++
 .../client/KerberosAuthenticator.java   |   8 +-
 .../server/AuthenticationFilter.java|  47 ++-
 .../server/AuthenticationHandler.java   |   2 +-
 .../server/AuthenticationHandlerUtil.java   | 105 ++
 .../server/CompositeAuthenticationHandler.java  |  30 ++
 .../authentication/server/HttpConstants.java|  55 +++
 .../server/LdapAuthenticationHandler.java   | 339 +++
 .../MultiSchemeAuthenticationHandler.java   | 209 
 .../authentication/server/package-info.java |  27 ++
 .../src/site/markdown/Configuration.md  | 137 
 .../client/TestKerberosAuthenticator.java   |  71 +++-
 .../authentication/server/LdapConstants.java|  31 ++
 .../server/TestLdapAuthenticationHandler.java   | 159 +
 .../TestMultiSchemeAuthenticationHandler.java   | 189 +++
 .../DelegationTokenAuthenticationFilter.java|   9 +-
 .../DelegationTokenAuthenticationHandler.java   |  25 +-
 ...emeDelegationTokenAuthenticationHandler.java | 182 ++
 hadoop-project/pom.xml  |   4 +
 19 files changed, 1649 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bca3852/hadoop-common-project/hadoop-auth/pom.xml
--
diff --git a/hadoop-common-project/hadoop-auth/pom.xml 
b/hadoop-common-project/hadoop-auth/pom.xml
index 4cbdc49..0b37715 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -135,6 +135,48 @@
   org.apache.kerby
   kerb-simplekdc
 
+
+  org.apache.directory.server
+  apacheds-core
+  ${apacheds.version}
+  test
+
+
+  org.apache.directory.server
+  apacheds-protocol-ldap
+  ${apacheds.version}
+  test
+
+
+  org.apache.directory.server
+  apacheds-ldif-partition
+  ${apacheds.version}
+  test
+
+
+  org.apache.directory.api
+  api-ldap-codec-core
+  ${ldap-api.version}
+  test
+
+
+  org.apache.directory.api
+  api-ldap-model
+  ${ldap-api.version}
+  test
+
+
+  org.apache.directory.server
+  apacheds-server-integ
+  ${apacheds.version}
+  test
+
+
+  org.apache.directory.server
+  apacheds-core-integ
+  ${apacheds.version}
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bca3852/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index a69ee46..ceec927 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -14,6 +14,7 @@
 package org.apache.hadoop.security.authentication.client;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.security.authentication.server.HttpConstants;
 import org.apache.hadoop.security.authentication.util.AuthToken;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
@@ -57,17 +58,18 @@ public class KerberosAuthenticator implements Authenticator 
{
   /**
* HTTP header used by the SPNEGO server endpoint during an authentication 
sequence.
*/
-  public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
+  public static final String WWW_AUTHENTICATE =
+  HttpConstants.WWW_AUTHENTICATE_HEADER;
 
   /**
* HTTP header used by the SPNEGO client endpoint during an authentication 
sequence.
*/
-  public static final String AUTHORIZATION = 

[49/50] [abbrv] hadoop git commit: HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by Hanisha Koneru.

2016-10-18 Thread sjlee
HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c62ae710
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c62ae710
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c62ae710

Branch: refs/heads/HADOOP-13070
Commit: c62ae7107f025091652e79db3edfca5c4dc84e4a
Parents: 6c348c5
Author: Xiaoyu Yao 
Authored: Mon Oct 17 15:25:24 2016 -0700
Committer: Xiaoyu Yao 
Committed: Tue Oct 18 14:05:43 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  32 +-
 .../hdfs/TestTrashWithEncryptionZones.java  | 188 
 .../TestTrashWithSecureEncryptionZones.java | 443 +++
 3 files changed, 662 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62ae710/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 963aaa6..7f26b03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -27,6 +27,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.BufferedOutputStream;
@@ -114,7 +115,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -167,6 +167,7 @@ import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Level;
 import org.junit.Assume;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.annotations.VisibleForTesting;
 import static 
org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
@@ -2054,4 +2055,33 @@ public class DFSTestUtil {
   }
 }
   }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+  boolean shouldExistInTrash) throws Exception {
+Path trashPath = Path.mergePaths(shell.getCurrentTrashDir(path), path);
+
+verifyDelete(shell, fs, path, trashPath, shouldExistInTrash);
+  }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+  Path trashPath, boolean shouldExistInTrash) throws Exception {
+assertTrue(path + " file does not exist", fs.exists(path));
+
+// Verify that trashPath has a path component named ".Trash"
+Path checkTrash = trashPath;
+while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
+  checkTrash = checkTrash.getParent();
+}
+assertEquals("No .Trash component found in trash path " + trashPath,
+".Trash", checkTrash.getName());
+
+String[] argv = new String[]{"-rm", "-r", path.toString()};
+int res = ToolRunner.run(shell, argv);
+assertEquals("rm failed", 0, res);
+if (shouldExistInTrash) {
+  assertTrue("File not in trash : " + trashPath, fs.exists(trashPath));
+} else {
+  assertFalse("File in trash : " + trashPath, fs.exists(trashPath));
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62ae710/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
new file mode 100644
index 000..2a8d493
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with 

[50/50] [abbrv] hadoop git commit: HADOOP-13400. Update the ApplicationClassLoader implementation in line with latest Java ClassLoader implementation. Contributed by Vrushali C.

2016-10-18 Thread sjlee
HADOOP-13400. Update the ApplicationClassLoader implementation in line with 
latest Java ClassLoader implementation. Contributed by Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0e56e36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0e56e36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0e56e36

Branch: refs/heads/HADOOP-13070
Commit: f0e56e36498a3bb793b882c55efe118d355eeae3
Parents: c62ae71
Author: Sangjin Lee 
Authored: Tue Oct 18 16:44:06 2016 -0700
Committer: Sangjin Lee 
Committed: Tue Oct 18 16:44:06 2016 -0700

--
 .../hadoop/util/ApplicationClassLoader.java | 66 ++--
 1 file changed, 33 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0e56e36/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
index 2f46e1f..9b89889 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
@@ -158,49 +158,49 @@ public class ApplicationClassLoader extends 
URLClassLoader {
   }
 
   @Override
-  protected synchronized Class loadClass(String name, boolean resolve)
+  protected Class loadClass(String name, boolean resolve)
   throws ClassNotFoundException {
-
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Loading class: " + name);
-}
+synchronized (getClassLoadingLock(name)) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Loading class: " + name);
+  }
 
-Class c = findLoadedClass(name);
-ClassNotFoundException ex = null;
+  Class c = findLoadedClass(name);
+  ClassNotFoundException ex = null;
+
+  if (c == null && !isSystemClass(name, systemClasses)) {
+// Try to load class from this classloader's URLs. Note that this is
+// like the servlet spec, not the usual Java 2 behaviour where we ask
+// the parent to attempt to load first.
+try {
+  c = findClass(name);
+  if (LOG.isDebugEnabled() && c != null) {
+LOG.debug("Loaded class: " + name + " ");
+  }
+} catch (ClassNotFoundException e) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug(e);
+  }
+  ex = e;
+}
+  }
 
-if (c == null && !isSystemClass(name, systemClasses)) {
-  // Try to load class from this classloader's URLs. Note that this is like
-  // the servlet spec, not the usual Java 2 behaviour where we ask the
-  // parent to attempt to load first.
-  try {
-c = findClass(name);
+  if (c == null) { // try parent
+c = parent.loadClass(name);
 if (LOG.isDebugEnabled() && c != null) {
-  LOG.debug("Loaded class: " + name + " ");
+  LOG.debug("Loaded class from parent: " + name + " ");
 }
-  } catch (ClassNotFoundException e) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug(e);
-}
-ex = e;
   }
-}
 
-if (c == null) { // try parent
-  c = parent.loadClass(name);
-  if (LOG.isDebugEnabled() && c != null) {
-LOG.debug("Loaded class from parent: " + name + " ");
+  if (c == null) {
+throw ex != null ? ex : new ClassNotFoundException(name);
   }
-}
 
-if (c == null) {
-  throw ex != null ? ex : new ClassNotFoundException(name);
-}
-
-if (resolve) {
-  resolveClass(c);
+  if (resolve) {
+resolveClass(c);
+  }
+  return c;
 }
-
-return c;
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread sjlee
HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. 
Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c348c56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c348c56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c348c56

Branch: refs/heads/HADOOP-13070
Commit: 6c348c56918973fd988b110e79231324a8befe12
Parents: b733a6f
Author: Steve Loughran 
Authored: Tue Oct 18 19:33:38 2016 +0100
Committer: Steve Loughran 
Committed: Tue Oct 18 21:16:02 2016 +0100

--
 .../src/main/resources/core-default.xml |  74 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |  16 +-
 hadoop-tools/hadoop-aws/pom.xml |  58 +-
 .../s3a/BlockingThreadPoolExecutorService.java  | 168 +---
 .../org/apache/hadoop/fs/s3a/Constants.java |  71 +-
 .../hadoop/fs/s3a/S3ABlockOutputStream.java | 703 
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java | 821 +++
 .../hadoop/fs/s3a/S3AFastOutputStream.java  | 410 -
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 408 +++--
 .../hadoop/fs/s3a/S3AInstrumentation.java   | 248 +-
 .../apache/hadoop/fs/s3a/S3AOutputStream.java   |  57 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  39 +
 .../fs/s3a/SemaphoredDelegatingExecutor.java| 230 ++
 .../org/apache/hadoop/fs/s3a/Statistic.java |  32 +-
 .../src/site/markdown/tools/hadoop-aws/index.md | 668 +--
 .../fs/contract/s3a/ITestS3AContractDistCp.java |  10 +-
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  |   1 +
 .../ITestBlockingThreadPoolExecutorService.java |  48 +-
 .../hadoop/fs/s3a/ITestS3ABlockOutputArray.java |  90 ++
 .../fs/s3a/ITestS3ABlockOutputByteBuffer.java   |  30 +
 .../hadoop/fs/s3a/ITestS3ABlockOutputDisk.java  |  30 +
 .../fs/s3a/ITestS3ABlockingThreadPool.java  |   2 +
 .../hadoop/fs/s3a/ITestS3AConfiguration.java|  29 +
 .../ITestS3AEncryptionBlockOutputStream.java|  36 +
 .../s3a/ITestS3AEncryptionFastOutputStream.java |  35 -
 .../hadoop/fs/s3a/ITestS3AFastOutputStream.java |  74 --
 .../apache/hadoop/fs/s3a/ITestS3ATestUtils.java |  98 +++
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |  75 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  | 148 +++-
 .../apache/hadoop/fs/s3a/TestDataBlocks.java| 124 +++
 .../ITestS3AFileContextStatistics.java  |   1 +
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java | 412 ++
 .../fs/s3a/scale/ITestS3ADeleteManyFiles.java   |  19 +-
 .../s3a/scale/ITestS3AHugeFilesArrayBlocks.java |  31 +
 .../ITestS3AHugeFilesByteBufferBlocks.java  |  34 +
 .../scale/ITestS3AHugeFilesClassicOutput.java   |  41 +
 .../s3a/scale/ITestS3AHugeFilesDiskBlocks.java  |  31 +
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 151 ++--
 38 files changed, 4647 insertions(+), 906 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c348c56/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 4882728..daa421c 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -994,8 +994,8 @@
 
   fs.s3a.threads.max
   10
-   Maximum number of concurrent active (part)uploads,
-which each use a thread from the threadpool.
+  The total number of threads available in the filesystem for data
+uploads *or any other queued filesystem operation*.
 
 
 
@@ -1008,8 +1008,7 @@
 
   fs.s3a.max.total.tasks
   5
-  Number of (part)uploads allowed to the queue before
-blocking additional uploads.
+  The number of operations which can be queued for 
execution
 
 
 
@@ -1047,13 +1046,21 @@
   fs.s3a.multipart.purge
   false
   True if you want to purge existing multipart uploads that may 
not have been
- completed/aborted correctly
+completed/aborted correctly. The corresponding purge age is defined in
+fs.s3a.multipart.purge.age.
+If set, when the filesystem is instantiated then all outstanding uploads
+older than the purge age will be terminated -across the entire bucket.
+This will impact multipart uploads by other applications and users. so 
should
+be used sparingly, with an age value chosen to stop failed uploads, without
+breaking ongoing operations.
+  
 
 
 
   fs.s3a.multipart.purge.age
   86400
-  Minimum age in seconds of multipart uploads to 
purge
+  Minimum age in seconds of multipart uploads to purge.
+  
 
 
 
@@ -1086,10 +1093,50 @@
 
  

[31/50] [abbrv] hadoop git commit: HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. Contributed by Manoj Govindassamy.

2016-10-18 Thread sjlee
HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. Contributed by 
Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f4afc81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f4afc81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f4afc81

Branch: refs/heads/HADOOP-13070
Commit: 0f4afc81009129bbee89d5b6cf22c8dda612d223
Parents: 412c4c9
Author: Andrew Wang 
Authored: Mon Oct 17 13:15:11 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 13:15:11 2016 -0700

--
 .../org/apache/hadoop/fs/viewfs/InodeTree.java  | 206 +--
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  91 
 .../hadoop/fs/viewfs/TestViewFsConfig.java  |  42 ++--
 3 files changed, 155 insertions(+), 184 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f4afc81/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 8c42cdf..a485a3b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -36,47 +36,45 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
-
 /**
  * InodeTree implements a mount-table as a tree of inodes.
  * It is used to implement ViewFs and ViewFileSystem.
  * In order to use it the caller must subclass it and implement
  * the abstract methods {@link #getTargetFileSystem(INodeDir)}, etc.
- * 
+ *
  * The mountable is initialized from the config variables as 
  * specified in {@link ViewFs}
  *
  * @param  is AbstractFileSystem or FileSystem
- * 
- * The three main methods are
- * {@link #InodeTreel(Configuration)} // constructor
+ *
+ * The two main methods are
  * {@link #InodeTree(Configuration, String)} // constructor
  * {@link #resolve(String, boolean)} 
  */
 
 @InterfaceAudience.Private
-@InterfaceStability.Unstable 
+@InterfaceStability.Unstable
 abstract class InodeTree {
-  static enum ResultKind {isInternalDir, isExternalDir;};
+  enum ResultKind {
+INTERNAL_DIR,
+EXTERNAL_DIR
+  }
+
   static final Path SlashPath = new Path("/");
-  
-  final INodeDir root; // the root of the mount table
-  
-  final String homedirPrefix; // the homedir config value for this mount table
-  
-  List mountPoints = new ArrayList();
-  
-  
+  private final INodeDir root; // the root of the mount table
+  private final String homedirPrefix; // the homedir for this mount table
+  private List mountPoints = new ArrayList();
+
   static class MountPoint {
 String src;
 INodeLink target;
+
 MountPoint(String srcPath, INodeLink mountLink) {
   src = srcPath;
   target = mountLink;
 }
-
   }
-  
+
   /**
* Breaks file path into component names.
* @param path
@@ -84,18 +82,19 @@ abstract class InodeTree {
*/
   static String[] breakIntoPathComponents(final String path) {
 return path == null ? null : path.split(Path.SEPARATOR);
-  } 
-  
+  }
+
   /**
* Internal class for inode tree
* @param 
*/
   abstract static class INode {
 final String fullPath; // the full path to the root
+
 public INode(String pathToNode, UserGroupInformation aUgi) {
   fullPath = pathToNode;
 }
-  };
+  }
 
   /**
* Internal class to represent an internal dir of the mount table
@@ -105,37 +104,28 @@ abstract class InodeTree {
 final Map children = new HashMap();
 T InodeDirFs =  null; // file system of this internal directory of mountT
 boolean isRoot = false;
-
+
 INodeDir(final String pathToNode, final UserGroupInformation aUgi) {
   super(pathToNode, aUgi);
 }
 
-INode resolve(final String pathComponent) throws FileNotFoundException {
- 

[45/50] [abbrv] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c348c56/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
new file mode 100644
index 000..a60d084
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
@@ -0,0 +1,412 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.amazonaws.event.ProgressEvent;
+import com.amazonaws.event.ProgressEventType;
+import com.amazonaws.event.ProgressListener;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.S3AFileStatus;
+import org.apache.hadoop.fs.s3a.Statistic;
+import org.apache.hadoop.util.Progressable;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+
+/**
+ * Scale test which creates a huge file.
+ *
+ * Important: the order in which these tests execute is fixed to
+ * alphabetical order. Test cases are numbered {@code test_123_} to impose
+ * an ordering based on the numbers.
+ *
+ * Having this ordering allows the tests to assume that the huge file
+ * exists. Even so: they should all have a {@link #assumeHugeFileExists()}
+ * check at the start, in case an individual test is executed.
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
+  private static final Logger LOG = LoggerFactory.getLogger(
+  AbstractSTestS3AHugeFiles.class);
+  public static final int DEFAULT_UPLOAD_BLOCKSIZE = 64 * _1KB;
+  public static final String DEFAULT_PARTITION_SIZE = "8M";
+  private Path scaleTestDir;
+  private Path hugefile;
+  private Path hugefileRenamed;
+
+  private int uploadBlockSize = DEFAULT_UPLOAD_BLOCKSIZE;
+  private int partitionSize;
+
+  @Override
+  public void setUp() throws Exception {
+super.setUp();
+
+final Path testPath = getTestPath();
+scaleTestDir = new Path(testPath, "scale");
+hugefile = new Path(scaleTestDir, "hugefile");
+hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+// do nothing. Specifically: do not delete the test dir
+  }
+
+  /**
+   * Note that this can get called before test setup.
+   * @return the configuration to use.
+   */
+  @Override
+  protected Configuration createConfiguration() {
+Configuration conf = super.createConfiguration();
+partitionSize = (int)getTestPropertyBytes(conf,
+KEY_HUGE_PARTITION_SIZE,
+DEFAULT_PARTITION_SIZE);
+assertTrue("Partition size too small: " + partitionSize,
+partitionSize > MULTIPART_MIN_SIZE);
+conf.setLong(SOCKET_SEND_BUFFER, _1MB);
+conf.setLong(SOCKET_RECV_BUFFER, _1MB);
+conf.setLong(MIN_MULTIPART_THRESHOLD, partitionSize);
+conf.setInt(MULTIPART_SIZE, partitionSize);
+conf.set(USER_AGENT_PREFIX, "STestS3AHugeFileCreate");
+conf.setBoolean(FAST_UPLOAD, true);
+conf.set(FAST_UPLOAD_BUFFER, getBlockOutputBufferName());
+return conf;
+  }
+
+  /**
+   * The name of the buffering mechanism to use.
+   * @return a buffering mechanism
+   */
+  protected abstract String getBlockOutputBufferName();
+
+  @Test
+  public void test_010_CreateHugeFile() throws 

[40/50] [abbrv] hadoop git commit: HADOOP-13061. Refactor erasure coders. Contributed by Kai Sasaki

2016-10-18 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c023c748/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index afaaf24..6e679c3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
@@ -32,15 +31,11 @@ import 
org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
  * It implements {@link ErasureCoder}.
  */
 @InterfaceAudience.Private
-public class RSErasureDecoder extends AbstractErasureDecoder {
+public class RSErasureDecoder extends ErasureDecoder {
   private RawErasureDecoder rsRawDecoder;
 
-  public RSErasureDecoder(int numDataUnits, int numParityUnits) {
-super(numDataUnits, numParityUnits);
-  }
-
-  public RSErasureDecoder(ECSchema schema) {
-super(schema);
+  public RSErasureDecoder(ErasureCoderOptions options) {
+super(options);
   }
 
   @Override
@@ -56,11 +51,8 @@ public class RSErasureDecoder extends AbstractErasureDecoder 
{
 
   private RawErasureDecoder checkCreateRSRawDecoder() {
 if (rsRawDecoder == null) {
-  // TODO: we should create the raw coder according to codec.
-  ErasureCoderOptions coderOptions = new ErasureCoderOptions(
-  getNumDataUnits(), getNumParityUnits());
   rsRawDecoder = CodecUtil.createRawDecoder(getConf(),
-  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
+  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
 }
 return rsRawDecoder;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c023c748/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
index 2139113..7a09b92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
@@ -32,15 +31,11 @@ import 
org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
  * It implements {@link ErasureCoder}.
  */
 @InterfaceAudience.Private
-public class RSErasureEncoder extends AbstractErasureEncoder {
+public class RSErasureEncoder extends ErasureEncoder {
   private RawErasureEncoder rawEncoder;
 
-  public RSErasureEncoder(int numDataUnits, int numParityUnits) {
-super(numDataUnits, numParityUnits);
-  }
-
-  public RSErasureEncoder(ECSchema schema) {
-super(schema);
+  public RSErasureEncoder(ErasureCoderOptions options) {
+super(options);
   }
 
   @Override
@@ -57,10 +52,8 @@ public class RSErasureEncoder extends AbstractErasureEncoder 
{
   private RawErasureEncoder checkCreateRSRawEncoder() {
 if (rawEncoder == null) {
   // TODO: we should create the raw coder according to codec.
-  ErasureCoderOptions coderOptions = new ErasureCoderOptions(
-  getNumDataUnits(), getNumParityUnits());
   rawEncoder = CodecUtil.createRawEncoder(getConf(),
-  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
+  ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
 }
 return rawEncoder;
   }
@@ -71,4 +64,9 @@ public class RSErasureEncoder extends AbstractErasureEncoder {
   rawEncoder.release();
 }
   }
+
+  @Override
+  

[47/50] [abbrv] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c348c56/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
deleted file mode 100644
index c25d0fb..000
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
+++ /dev/null
@@ -1,410 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.event.ProgressEvent;
-import com.amazonaws.event.ProgressListener;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
-import com.amazonaws.services.s3.model.CannedAccessControlList;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
-import com.amazonaws.services.s3.model.ObjectMetadata;
-import com.amazonaws.services.s3.model.PartETag;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
-import com.amazonaws.services.s3.model.UploadPartRequest;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.util.Progressable;
-import org.slf4j.Logger;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-
-import static org.apache.hadoop.fs.s3a.S3AUtils.*;
-import static org.apache.hadoop.fs.s3a.Statistic.*;
-
-/**
- * Upload files/parts asap directly from a memory buffer (instead of buffering
- * to a file).
- * 
- * Uploads are managed low-level rather than through the AWS TransferManager.
- * This allows for uploading each part of a multi-part upload as soon as
- * the bytes are in memory, rather than waiting until the file is closed.
- * 
- * Unstable: statistics and error handling might evolve
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class S3AFastOutputStream extends OutputStream {
-
-  private static final Logger LOG = S3AFileSystem.LOG;
-  private final String key;
-  private final String bucket;
-  private final AmazonS3 client;
-  private final int partSize;
-  private final int multiPartThreshold;
-  private final S3AFileSystem fs;
-  private final CannedAccessControlList cannedACL;
-  private final ProgressListener progressListener;
-  private final ListeningExecutorService executorService;
-  private MultiPartUpload multiPartUpload;
-  private boolean closed;
-  private ByteArrayOutputStream buffer;
-  private int bufferLimit;
-
-
-  /**
-   * Creates a fast OutputStream that uploads to S3 from memory.
-   * For MultiPartUploads, as soon as sufficient bytes have been written to
-   * the stream a part is uploaded immediately (by using the low-level
-   * multi-part upload API on the AmazonS3Client).
-   *
-   * @param client AmazonS3Client used for S3 calls
-   * @param fs S3AFilesystem
-   * @param bucket S3 bucket name
-   * @param key S3 key name
-   * @param progress report progress in order to prevent timeouts
-   * @param cannedACL used CannedAccessControlList
-   * @param partSize size of a single part in a multi-part upload (except
-   * last part)
-   * @param multiPartThreshold files at least this size use multi-part upload
-   * @param threadPoolExecutor thread factory
-   * @throws IOException on any problem
-   */
-  public S3AFastOutputStream(AmazonS3 client,
- 

[46/50] [abbrv] hadoop git commit: HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed by Steve Loughran

2016-10-18 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c348c56/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index cf785d5..c23e782 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -1,3 +1,4 @@
+
 

[16/50] [abbrv] hadoop git commit: HADOOP-13686. Adding additional unit test for Trash (I). Contributed by Weiwei Yang.

2016-10-18 Thread sjlee
HADOOP-13686. Adding additional unit test for Trash (I). Contributed by Weiwei 
Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbe663d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbe663d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbe663d5

Branch: refs/heads/HADOOP-13070
Commit: dbe663d5241feea0c88a3a9391ad48a029001d94
Parents: 5a5a724
Author: Xiaoyu Yao 
Authored: Thu Oct 13 23:05:16 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Oct 13 23:05:16 2016 -0700

--
 .../apache/hadoop/fs/TrashPolicyDefault.java|  11 +-
 .../java/org/apache/hadoop/fs/TestTrash.java| 352 ++-
 2 files changed, 356 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbe663d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 7be..4f4c937 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Time;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /** Provides a trash feature.  Files are moved to a user's trash
  * directory, a subdirectory of their home directory named ".Trash".  Files are
  * initially moved to a current sub-directory of the trash directory.
@@ -215,7 +217,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 return new Emptier(getConf(), emptierInterval);
   }
 
-  private class Emptier implements Runnable {
+  protected class Emptier implements Runnable {
 
 private Configuration conf;
 private long emptierInterval;
@@ -223,7 +225,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 Emptier(Configuration conf, long emptierInterval) throws IOException {
   this.conf = conf;
   this.emptierInterval = emptierInterval;
-  if (emptierInterval > deletionInterval || emptierInterval == 0) {
+  if (emptierInterval > deletionInterval || emptierInterval <= 0) {
 LOG.info("The configured checkpoint interval is " +
  (emptierInterval / MSECS_PER_MINUTE) + " minutes." +
  " Using an interval of " +
@@ -287,6 +289,11 @@ public class TrashPolicyDefault extends TrashPolicy {
 private long floor(long time, long interval) {
   return (time / interval) * interval;
 }
+
+@VisibleForTesting
+protected long getEmptierInterval() {
+  return this.emptierInterval/MSECS_PER_MINUTE;
+}
   }
 
   private void createCheckpoint(Path trashRoot, Date date) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbe663d5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 338aff6..7a5b25e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -29,13 +29,19 @@ import java.net.URI;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.HashSet;
+import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.TrashPolicyDefault.Emptier;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.junit.Before;
+import org.junit.Test;
 
 /**
  * This class tests commands from Trash.
@@ -45,6 +51,13 @@ public class TestTrash extends TestCase {
   private final static Path TEST_DIR = new Path(GenericTestUtils.getTempPath(
   "testTrash"));
 
+  @Before
+  public void setUp() throws IOException {
+// ensure each test initiates a FileSystem instance,
+// avoid getting an old instance from cache.
+FileSystem.closeAll();
+  }
+
   protected static Path mkdir(FileSystem fs, Path p) throws IOException {
 assertTrue(fs.mkdirs(p));
 

[25/50] [abbrv] hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-18 Thread sjlee
HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/391ce535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/391ce535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/391ce535

Branch: refs/heads/HADOOP-13070
Commit: 391ce535a739dc92cb90017d759217265a4fd969
Parents: 30bb197
Author: Vinitha Reddy Gankidi 
Authored: Fri Oct 14 10:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Oct 14 18:13:54 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 75 ++--
 .../blockmanagement/DatanodeDescriptor.java | 48 -
 .../blockmanagement/DatanodeStorageInfo.java| 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  4 +-
 .../blockmanagement/TestBlockManager.java   | 19 +++--
 .../TestNameNodePrunesMissingStorages.java  | 70 +++---
 .../server/datanode/BlockReportTestBase.java| 50 +
 .../TestAddOverReplicatedStripedBlocks.java |  4 ++
 8 files changed, 147 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/391ce535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7949439..7b13add 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1347,6 +1347,8 @@ public class BlockManager implements BlockStatsMXBean {
   }
 }
 checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -2191,7 +2193,7 @@ public class BlockManager implements BlockStatsMXBean {
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
   final BlockListAsLongs newReport,
-  BlockReportContext context, boolean lastStorageInRpc) throws IOException 
{
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -2245,32 +2247,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-long leaseId = blockReportLeaseManager.removeLease(node);
-BlockManagerFaultInjector.getInstance().
-removeBlockReportLease(node, leaseId);
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -2295,36 +2271,25 @@ public class BlockManager implements BlockStatsMXBean {
 return !node.hasStaleStorages();
   }
 
-  private void removeZombieReplicas(BlockReportContext context,
-  DatanodeStorageInfo zombie) {
-LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
-"longer exists on the DataNode.",
-Long.toHexString(context.getReportId()), zombie.getStorageID());
-assert(namesystem.hasWriteLock());
-Iterator iter = zombie.getBlockIterator();
-int prevBlocks = zombie.numBlocks();
-while (iter.hasNext()) {
-  BlockInfo block = iter.next();
-  // We assume that a block can be on only one storage in a DataNode.
-  // That's why we pass in the DatanodeDescriptor 

[26/50] [abbrv] hadoop git commit: HDFS-11012. Unnecessary INFO logging on DFSClients for InvalidToken. Contributed by Harsh J.

2016-10-18 Thread sjlee
HDFS-11012. Unnecessary INFO logging on DFSClients for InvalidToken. 
Contributed by Harsh J.

This closes #142


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ad037df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ad037df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ad037df

Branch: refs/heads/HADOOP-13070
Commit: 5ad037df25ab3206509083276b7ef4ef001be48b
Parents: 391ce53
Author: Akira Ajisaka 
Authored: Sat Oct 15 22:14:24 2016 +0900
Committer: Akira Ajisaka 
Committed: Sat Oct 15 22:14:24 2016 +0900

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java| 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ad037df/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index dbffc64..5783f90 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1261,8 +1261,9 @@ public class DFSInputStream extends FSInputStream
  */
 if (ex instanceof InvalidBlockTokenException ||
 ex instanceof InvalidToken) {
-  DFSClient.LOG.info("Access token was invalid when connecting to "
-  + targetAddr + " : " + ex);
+  DFSClient.LOG.debug(
+  "Access token was invalid when connecting to {}: {}",
+  targetAddr, ex);
   return true;
 }
 return false;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HDFS-11013. Correct typos in native erasure coding dump code. Contributed by László Bence Nagy.

2016-10-18 Thread sjlee
HDFS-11013. Correct typos in native erasure coding dump code. Contributed by 
László Bence Nagy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b671ee68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b671ee68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b671ee68

Branch: refs/heads/HADOOP-13070
Commit: b671ee6846b79a6d106efed7cf7e1209b2cc408d
Parents: 987ee51
Author: Andrew Wang 
Authored: Mon Oct 17 14:14:50 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 14:14:50 2016 -0700

--
 .../main/native/src/org/apache/hadoop/io/erasurecode/dump.c  | 8 
 .../native/src/org/apache/hadoop/io/erasurecode/isal_load.h  | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b671ee68/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
index 20bd189..e48032e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
@@ -57,11 +57,11 @@ void dumpCodingMatrix(unsigned char* buf, int n1, int n2) {
 
 void dumpEncoder(IsalEncoder* pCoder) {
   int numDataUnits = pCoder->coder.numDataUnits;
-  int numParityUnits = pCoder->coder.numDataUnits;
+  int numParityUnits = pCoder->coder.numParityUnits;
   int numAllUnits = pCoder->coder.numAllUnits;
 
-  printf("Encoding (numAlnumParityUnitslUnits = %d, numDataUnits = %d)\n",
-numParityUnits, numDataUnits);
+  printf("Encoding (numAllUnits = %d, numParityUnits = %d, numDataUnits = 
%d)\n",
+numAllUnits, numParityUnits, numDataUnits);
 
   printf("\n\nEncodeMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->encodeMatrix,
@@ -91,7 +91,7 @@ void dumpDecoder(IsalDecoder* pCoder) {
 
   printf("InvertMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->invertMatrix,
-   numDataUnits, numDataUnits);
+   numDataUnits, numAllUnits);
 
   printf("DecodeMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->decodeMatrix,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b671ee68/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
index 7cb7a6a..c46a531 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
@@ -57,7 +57,7 @@ typedef void (*__d_ec_encode_data_update)(int, int, int, int, 
unsigned char*,
 #endif
 
 #ifdef WINDOWS
-// For erasure_code.h
+// For gf_util.h
 typedef unsigned char (__cdecl *__d_gf_mul)(unsigned char, unsigned char);
 typedef unsigned char (__cdecl *__d_gf_inv)(unsigned char);
 typedef void (__cdecl *__d_gf_gen_rs_matrix)(unsigned char *, int, int);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: HADOOP-13723. AliyunOSSInputStream#read() should update read bytes stat correctly. Contributed by Mingliang Liu

2016-10-18 Thread sjlee
HADOOP-13723. AliyunOSSInputStream#read() should update read bytes stat 
correctly. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9f73f1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9f73f1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9f73f1b

Branch: refs/heads/HADOOP-13070
Commit: d9f73f1b7cd893a7d88baa9bfd1b809a5dec9e59
Parents: ae51b11
Author: Mingliang Liu 
Authored: Thu Oct 13 17:05:28 2016 -0700
Committer: Mingliang Liu 
Committed: Thu Oct 13 22:33:55 2016 -0700

--
 .../java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9f73f1b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
index b87a3a7..a3af7ce 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
@@ -123,7 +123,7 @@ public class AliyunOSSInputStream extends FSInputStream {
 }
 
 if (statistics != null && byteRead >= 0) {
-  statistics.incrementBytesRead(1);
+  statistics.incrementBytesRead(byteRead);
 }
 return byteRead;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: HDFS-11008. Change unit test for testing parsing "-source" parameter in Balancer CLI. Contributed by Mingliang Liu

2016-10-18 Thread sjlee
HDFS-11008. Change unit test for testing parsing "-source" parameter in 
Balancer CLI. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76cc84e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76cc84e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76cc84e6

Branch: refs/heads/HADOOP-13070
Commit: 76cc84e6d41c2b02218c2c98d60481cd565e067c
Parents: aee538b
Author: Mingliang Liu 
Authored: Thu Oct 13 17:51:38 2016 -0700
Committer: Mingliang Liu 
Committed: Fri Oct 14 14:29:02 2016 -0700

--
 .../hdfs/server/balancer/TestBalancer.java  | 61 
 1 file changed, 38 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76cc84e6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 73a4cbc..f58a3ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -1282,6 +1282,14 @@ public class TestBalancer {
 } catch (IllegalArgumentException e) {
 
 }
+
+parameters = new String[] {"-source"};
+try {
+  Balancer.Cli.parse(parameters);
+  fail(reason + " for -source parameter");
+} catch (IllegalArgumentException ignored) {
+  // expected
+}
   }
 
   @Test
@@ -1800,11 +1808,12 @@ public class TestBalancer {
 final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf);
 
 { // run Balancer with min-block-size=50
-  BalancerParameters.Builder b =
-  new BalancerParameters.Builder();
-  b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
-  b.setThreshold(1);
-  final BalancerParameters p = b.build();
+  final BalancerParameters p = Balancer.Cli.parse(new String[] {
+  "-policy", BalancingPolicy.Node.INSTANCE.getName(),
+  "-threshold", "1"
+  });
+  assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
+  assertEquals(p.getThreshold(), 1.0, 0.001);
 
   conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 
50);
   final int r = Balancer.run(namenodes, p, conf);
@@ -1819,12 +1828,14 @@ public class TestBalancer {
   for(int i = capacities.length; i < datanodes.size(); i++) {
 sourceNodes.add(datanodes.get(i).getDisplayName());
   }
-  BalancerParameters.Builder b =
-  new BalancerParameters.Builder();
-  b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
-  b.setThreshold(1);
-  b.setSourceNodes(sourceNodes);
-  final BalancerParameters p = b.build();
+  final BalancerParameters p = Balancer.Cli.parse(new String[] {
+  "-policy", BalancingPolicy.Node.INSTANCE.getName(),
+  "-threshold", "1",
+  "-source", StringUtils.join(sourceNodes, ',')
+  });
+  assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
+  assertEquals(p.getThreshold(), 1.0, 0.001);
+  assertEquals(p.getSourceNodes(), sourceNodes);
 
   conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 
50);
   final int r = Balancer.run(namenodes, p, conf);
@@ -1835,12 +1846,14 @@ public class TestBalancer {
   final Set sourceNodes = new HashSet<>();
   final List datanodes = cluster.getDataNodes();
   sourceNodes.add(datanodes.get(0).getDisplayName());
-  BalancerParameters.Builder b =
-  new BalancerParameters.Builder();
-  b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
-  b.setThreshold(1);
-  b.setSourceNodes(sourceNodes);
-  final BalancerParameters p = b.build();
+  final BalancerParameters p = Balancer.Cli.parse(new String[] {
+  "-policy", BalancingPolicy.Node.INSTANCE.getName(),
+  "-threshold", "1",
+  "-source", StringUtils.join(sourceNodes, ',')
+  });
+  assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
+  assertEquals(p.getThreshold(), 1.0, 0.001);
+  assertEquals(p.getSourceNodes(), sourceNodes);
 
   conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
   final int r = Balancer.run(namenodes, p, conf);
@@ -1853,12 +1866,14 @@ public class TestBalancer {
   for(int i = 0; i < capacities.length; i++) {
 sourceNodes.add(datanodes.get(i).getDisplayName());
   }
-  

[09/50] [abbrv] hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-18 Thread sjlee
HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332a61fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332a61fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332a61fd

Branch: refs/heads/HADOOP-13070
Commit: 332a61fd74fd2a9874319232c583ab5d2c53ff03
Parents: fdce515
Author: Kihwal Lee 
Authored: Thu Oct 13 13:52:49 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 13:52:49 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332a61fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 6436fab..87b36da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -389,6 +389,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * statistics.
  */
@@ -418,6 +422,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -451,7 +456,8 @@ public class DecommissionManager {
   iterkey).iterator();
   final LinkedList toRemove = new LinkedList<>();
 
-  while (it.hasNext() && !exceededNumBlocksPerCheck()) {
+  while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
+  .isRunning()) {
 numNodesChecked++;
 final Map.Entry
 entry = it.next();
@@ -577,7 +583,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int lowRedundancyInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientList == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by MingLiang Liu

2016-10-18 Thread sjlee
HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by 
MingLiang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12912540
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12912540
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12912540

Branch: refs/heads/HADOOP-13070
Commit: 129125404244f35ee63b8f0491a095371685e9ba
Parents: 9454dc5
Author: Brahma Reddy Battula 
Authored: Thu Oct 13 21:39:50 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Oct 13 22:05:00 2016 +0530

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   8 +-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 106 +--
 2 files changed, 51 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12912540/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 32401dc..a60f24b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -936,8 +936,7 @@ public class DFSAdmin extends FsShell {
   System.out.println("Balancer bandwidth is " + bandwidth
   + " bytes per second.");
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2207,7 +2206,7 @@ public class DFSAdmin extends FsShell {
   dnProxy.evictWriters();
   System.out.println("Requested writer eviction to datanode " + dn);
 } catch (IOException ioe) {
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2218,8 +2217,7 @@ public class DFSAdmin extends FsShell {
   DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
   System.out.println(dnInfo.getDatanodeLocalReport());
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12912540/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index b49f73d..dca42ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -79,6 +80,7 @@ public class TestDFSAdmin {
   @Before
   public void setUp() throws Exception {
 conf = new Configuration();
+conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
 restartCluster();
 
 admin = new DFSAdmin();
@@ -116,7 +118,7 @@ public class TestDFSAdmin {
 if (cluster != null) {
   cluster.shutdown();
 }
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
 cluster.waitActive();
 datanode = cluster.getDataNodes().get(0);
 namenode = cluster.getNameNode();
@@ -171,70 +173,58 @@ public class TestDFSAdmin {
   @Test(timeout = 3)
   public void testGetDatanodeInfo() throws Exception {
 redirectStream();
-final Configuration dfsConf = new HdfsConfiguration();
-final int numDn = 2;
-
-/* init cluster */
-try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
-.numDataNodes(numDn).build()) {
-
-  miniCluster.waitActive();
-  assertEquals(numDn, miniCluster.getDataNodes().size());
-  final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+final DFSAdmin dfsAdmin = new DFSAdmin(conf);
 
-  /* init reused vars */
-  List outs = null;
-  int ret;

[29/50] [abbrv] hadoop git commit: HADOOP-13661. Upgrade HTrace version. Contributed by Sean Mackrory.

2016-10-18 Thread sjlee
HADOOP-13661. Upgrade HTrace version. Contributed by Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed9fcbec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed9fcbec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed9fcbec

Branch: refs/heads/HADOOP-13070
Commit: ed9fcbec544df149d08d9ac31989a7291eff6507
Parents: 1f304b0
Author: Wei-Chiu Chuang 
Authored: Mon Oct 17 05:04:49 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Oct 17 05:04:49 2016 -0700

--
 hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md | 2 +-
 hadoop-project/pom.xml   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9fcbec/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
index cbdee8a..9b7084d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
@@ -48,7 +48,7 @@ LocalFileSpanReceiver is included in the htrace-core4 jar 
which is bundled
 with Hadoop.)
 
 ```
-$ cp htrace-htraced/target/htrace-htraced-4.0.1-incubating.jar 
$HADOOP_HOME/share/hadoop/common/lib/
+$ cp htrace-htraced/target/htrace-htraced-4.1.0-incubating.jar 
$HADOOP_HOME/share/hadoop/common/lib/
 ```
 
 ### Dynamic update of tracing configuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9fcbec/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 82adebf..5826cf6 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -882,7 +882,7 @@
   
 org.apache.htrace
 htrace-core4
-4.0.1-incubating
+4.1.0-incubating
   
   
 org.jdom


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: HDFS-10883. 's behavior is not consistent in DFS after enabling EZ. Contributed by Yuanbo Liu.

2016-10-18 Thread sjlee
HDFS-10883. 's behavior is not consistent in DFS after enabling EZ. Contributed 
by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0007360c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0007360c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0007360c

Branch: refs/heads/HADOOP-13070
Commit: 0007360c3344b3485fa17de0fd2015a628de947c
Parents: 701c27a
Author: Andrew Wang 
Authored: Fri Oct 14 11:41:29 2016 -0700
Committer: Andrew Wang 
Committed: Fri Oct 14 11:41:29 2016 -0700

--
 .../hadoop/hdfs/DistributedFileSystem.java  |   5 +-
 .../src/site/markdown/TransparentEncryption.md  |   4 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  10 +-
 .../namenode/TestNestedEncryptionZones.java | 175 +--
 4 files changed, 139 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0007360c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 548815f..18a29e8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2478,11 +2478,12 @@ public class DistributedFileSystem extends FileSystem {
*/
   @Override
   public Path getTrashRoot(Path path) {
-if ((path == null) || path.isRoot() || !dfs.isHDFSEncryptionEnabled()) {
+if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
   return super.getTrashRoot(path);
 }
 
-String parentSrc = path.getParent().toUri().getPath();
+String parentSrc = path.isRoot()?
+path.toUri().getPath():path.getParent().toUri().getPath();
 try {
   EncryptionZone ez = dfs.getEZForPath(parentSrc);
   if ((ez != null)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0007360c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
index e7d9f1d..b82b400 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
@@ -242,12 +242,14 @@ By default, distcp compares checksums provided by the 
filesystem to verify that
 Rename and Trash considerations
 -
 
-HDFS restricts file and directory renames across encryption zone boundaries. 
This includes renaming an encrypted file / directory into an unencrypted 
directory (e.g., `hdfs dfs mv /zone/encryptedFile /home/bob`), renaming an 
unencrypted file / directory into an encryption zone (e.g., `hdfs dfs mv 
/home/bob/unEncryptedFile /zone`), and renaming between two different 
encryption zones (e.g., `hdfs dfs mv /home/alice/zone1/foo /home/alice/zone2`). 
In these examples, `/zone`, `/home/alice/zone1`, and `/home/alice/zone2` are 
encryption zones, while `/home/bob` is not. A rename is only allowed if the 
source and destination paths are in the same encryption zone, or both paths are 
unencrypted (not in any encryption zone).
+HDFS restricts file and directory renames across encryption zone boundaries. 
This includes renaming an encrypted file / directory into an unencrypted 
directory (e.g., `hdfs dfs mv /zone/encryptedFile /home/bob`), renaming an 
unencrypted file or directory into an encryption zone (e.g., `hdfs dfs mv 
/home/bob/unEncryptedFile /zone`), and renaming between two different 
encryption zones (e.g., `hdfs dfs mv /home/alice/zone1/foo /home/alice/zone2`). 
In these examples, `/zone`, `/home/alice/zone1`, and `/home/alice/zone2` are 
encryption zones, while `/home/bob` is not. A rename is only allowed if the 
source and destination paths are in the same encryption zone, or both paths are 
unencrypted (not in any encryption zone).
 
 This restriction enhances security and eases system management significantly. 
All file EDEKs under an encryption zone are encrypted with the encryption zone 
key. Therefore, if the encryption zone key is compromised, it is important to 
identify all vulnerable files and re-encrypt them. This is fundamentally 
difficult if a file initially created in an encryption zone can be renamed to 
an arbitrary location in the filesystem.
 
 

[06/50] [abbrv] hadoop git commit: HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN based on client request. Contributed by Xiaoyu Yao.

2016-10-18 Thread sjlee
HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN 
based on client request. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9097e2ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9097e2ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9097e2ef

Branch: refs/heads/HADOOP-13070
Commit: 9097e2efe4c92d83c8fab88dc11be84505a6cab5
Parents: b371c56
Author: Xiaoyu Yao 
Authored: Thu Oct 13 10:52:13 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Oct 13 10:52:28 2016 -0700

--
 .../authentication/server/KerberosAuthenticationHandler.java  | 7 +--
 1 file changed, 1 insertion(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9097e2ef/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index c6d1881..07c2a31 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -343,8 +343,6 @@ public class KerberosAuthenticationHandler implements 
AuthenticationHandler {
   authorization = 
authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
   final Base64 base64 = new Base64(0);
   final byte[] clientToken = base64.decode(authorization);
-  final String serverName = InetAddress.getByName(request.getServerName())
-   .getCanonicalHostName();
   try {
 token = Subject.doAs(serverSubject, new 
PrivilegedExceptionAction() {
 
@@ -354,10 +352,7 @@ public class KerberosAuthenticationHandler implements 
AuthenticationHandler {
 GSSContext gssContext = null;
 GSSCredential gssCreds = null;
 try {
-  gssCreds = gssManager.createCredential(
-  gssManager.createName(
-  KerberosUtil.getServicePrincipal("HTTP", serverName),
-  KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
+  gssCreds = gssManager.createCredential(null,
   GSSCredential.INDEFINITE_LIFETIME,
   new Oid[]{
 KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: HADOOP-13024. Distcp with -delete feature on raw data not implemented. Contributed by Mavin Martin.

2016-10-18 Thread sjlee
HADOOP-13024. Distcp with -delete feature on raw data not implemented. 
Contributed by Mavin Martin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a85d079
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a85d079
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a85d079

Branch: refs/heads/HADOOP-13070
Commit: 0a85d079838f532a13ca237300386d1b3bc1b178
Parents: 8c721aa
Author: Jing Zhao 
Authored: Thu Oct 13 13:24:37 2016 -0700
Committer: Jing Zhao 
Committed: Thu Oct 13 13:24:54 2016 -0700

--
 .../apache/hadoop/tools/DistCpConstants.java| 12 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |  5 ++-
 .../hadoop/tools/TestDistCpWithRawXAttrs.java   | 45 +---
 .../hadoop/tools/util/DistCpTestUtils.java  | 32 --
 4 files changed, 56 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index 96f364c..6171aa9 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.tools;
  * limitations under the License.
  */
 
+import org.apache.hadoop.fs.Path;
+
 /**
  * Utility class to hold commonly used constants.
  */
@@ -125,9 +127,17 @@ public class DistCpConstants {
   public static final int SPLIT_RATIO_DEFAULT  = 2;
 
   /**
+   * Constants for NONE file deletion
+   */
+  public static final String NONE_PATH_NAME = "/NONE";
+  public static final Path NONE_PATH = new Path(NONE_PATH_NAME);
+  public static final Path RAW_NONE_PATH = new Path(
+  DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME + NONE_PATH_NAME);
+
+  /**
* Value of reserved raw HDFS directory when copying raw.* xattrs.
*/
-  static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
+  public static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = 
"/.reserved/raw";
 
   static final String HDFS_DISTCP_DIFF_DIRECTORY_NAME = ".distcp.diff.tmp";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 6d2fef5..dd653b2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -238,7 +238,10 @@ public class CopyCommitter extends FileOutputCommitter {
 List targets = new ArrayList(1);
 Path targetFinalPath = new 
Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
 targets.add(targetFinalPath);
-DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
+Path resultNonePath = 
Path.getPathWithoutSchemeAndAuthority(targetFinalPath)
+
.toString().startsWith(DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME)
+? DistCpConstants.RAW_NONE_PATH : DistCpConstants.NONE_PATH;
+DistCpOptions options = new DistCpOptions(targets, resultNonePath);
 //
 // Set up options to be the same from the CopyListing.buildListing's 
perspective,
 // so to collect similar listings as when doing the copy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
index 5aef51a..8adc2cf 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
@@ -82,14 +82,7 @@ public class TestDistCpWithRawXAttrs {
 final String relDst = "/./.reserved/../.reserved/raw/../raw/dest/../dest";
 doTestPreserveRawXAttrs(relSrc, relDst, "-px", true, true,
 DistCpConstants.SUCCESS);
-

[10/50] [abbrv] hadoop git commit: Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin."

2016-10-18 Thread sjlee
Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. 
Contributed by Yiqun Lin."

This reverts commit fdce515091f0a61ffd6c9ae464a68447dedf1124.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c721aa0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c721aa0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c721aa0

Branch: refs/heads/HADOOP-13070
Commit: 8c721aa00a47a976959e3861ddd742f09db432fc
Parents: 332a61f
Author: Andrew Wang 
Authored: Thu Oct 13 13:23:12 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 13:23:28 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 -
 1 file changed, 4 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c721aa0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 19f3178..696b2aa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,8 +86,6 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
-DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
-
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -100,7 +98,6 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
-waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -108,7 +105,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -185,7 +182,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -202,8 +199,7 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForNumPendingDeletionBlocks(int numBlocks)
-  throws Exception {
+  private void waitForBlocksToDelete() throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -211,8 +207,7 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks()
-  == numBlocks) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: YARN-5717. Add tests for container-executor is_feature_enabled. Contributed by Sidharta Seethana

2016-10-18 Thread sjlee
YARN-5717. Add tests for container-executor is_feature_enabled. Contributed by 
Sidharta Seethana


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf3f43e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf3f43e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf3f43e9

Branch: refs/heads/HADOOP-13070
Commit: cf3f43e95bf46030875137fc36da5c1fbe14250d
Parents: 0a85d07
Author: Chris Douglas 
Authored: Thu Oct 13 20:47:49 2016 -0700
Committer: Chris Douglas 
Committed: Thu Oct 13 20:49:07 2016 -0700

--
 .../impl/container-executor.c   | 11 ++---
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 42 
 .../test/test-container-executor.c  | 51 
 4 files changed, 79 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf3f43e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index a9a7e96..8a995b1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -422,9 +422,9 @@ int change_user(uid_t user, gid_t group) {
   return 0;
 }
 
-
-static int is_feature_enabled(const char* feature_key, int default_value) {
-char *enabled_str = get_value(feature_key, _cfg);
+int is_feature_enabled(const char* feature_key, int default_value,
+  struct configuration *cfg) {
+char *enabled_str = get_value(feature_key, cfg);
 int enabled = default_value;
 
 if (enabled_str != NULL) {
@@ -448,15 +448,14 @@ static int is_feature_enabled(const char* feature_key, 
int default_value) {
 }
 }
 
-
 int is_docker_support_enabled() {
 return is_feature_enabled(DOCKER_SUPPORT_ENABLED_KEY,
-DEFAULT_DOCKER_SUPPORT_ENABLED);
+DEFAULT_DOCKER_SUPPORT_ENABLED, _cfg);
 }
 
 int is_tc_support_enabled() {
 return is_feature_enabled(TC_SUPPORT_ENABLED_KEY,
-DEFAULT_TC_SUPPORT_ENABLED);
+DEFAULT_TC_SUPPORT_ENABLED, _cfg);
 }
 
 char* check_docker_binary(char *docker_binary) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf3f43e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
index 5c17b29..8ad5d47 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
@@ -264,6 +264,10 @@ int check_dir(const char* npath, mode_t st_mode, mode_t 
desired,
 int create_validate_dir(const char* npath, mode_t perm, const char* path,
int finalComponent);
 
+/** Check if a feature is enabled in the specified configuration. */
+int is_feature_enabled(const char* feature_key, int default_value,
+  struct configuration *cfg);
+
 /** Check if tc (traffic control) support is enabled in configuration. */
 int is_tc_support_enabled();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf3f43e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 27a269e..47bb3b9 100644
--- 

[13/50] [abbrv] hadoop git commit: HADOOP-13669. Addendum patch for KMS Server should log exceptions before throwing.

2016-10-18 Thread sjlee
HADOOP-13669. Addendum patch for KMS Server should log exceptions before 
throwing.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae51b11f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae51b11f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae51b11f

Branch: refs/heads/HADOOP-13070
Commit: ae51b11f7872eaac558acf00fd23f6d7b1841cfe
Parents: cf3f43e
Author: Xiao Chen 
Authored: Thu Oct 13 22:32:08 2016 -0700
Committer: Xiao Chen 
Committed: Thu Oct 13 22:32:08 2016 -0700

--
 .../hadoop-kms/dev-support/findbugsExcludeFile.xml| 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae51b11f/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
index bc92ed7..78c4ca6 100644
--- a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
@@ -38,4 +38,11 @@
 
 
   
+  
+  
+
+
+  
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: HDFS-11000. webhdfs PUT does not work if requests are routed to call queue. Contributed by Kihwal Lee.

2016-10-18 Thread sjlee
HDFS-11000. webhdfs PUT does not work if requests are routed to call queue. 
Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9454dc5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9454dc5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9454dc5e

Branch: refs/heads/HADOOP-13070
Commit: 9454dc5e8091354cd0a4b8c8aa5f4004529db5d5
Parents: 901eca0
Author: Kihwal Lee 
Authored: Thu Oct 13 08:47:15 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 08:47:15 2016 -0500

--
 .../hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9454dc5e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 4887e35..4247a67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -332,7 +332,7 @@ public class NamenodeWebHdfsMethods {
 } else {
   //generate a token
   final Token t = generateDelegationToken(
-  namenode, ugi, userPrincipal.getName());
+  namenode, ugi, null);
   delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
 }
 final String query = op.toQueryString() + delegationQuery


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: HADOOP-13721. Remove stale method ViewFileSystem#getTrashCanLocation. Contributed by Manoj Govindassamy.

2016-10-18 Thread sjlee
HADOOP-13721. Remove stale method ViewFileSystem#getTrashCanLocation. 
Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aee538be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aee538be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aee538be

Branch: refs/heads/HADOOP-13070
Commit: aee538be6c2ab324de4d7834cd3347959272de01
Parents: 8c520a2
Author: Andrew Wang 
Authored: Fri Oct 14 14:08:31 2016 -0700
Committer: Andrew Wang 
Committed: Fri Oct 14 14:08:31 2016 -0700

--
 .../main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java  | 6 --
 1 file changed, 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aee538be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index edc59ab..f6947ff 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -220,12 +220,6 @@ public class ViewFileSystem extends FileSystem {
 this(FsConstants.VIEWFS_URI, conf);
   }
   
-  public Path getTrashCanLocation(final Path f) throws FileNotFoundException {
-final InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(f), true);
-return res.isInternalDir() ? null : 
res.targetFileSystem.getHomeDirectory();
-  }
-  
   @Override
   public URI getUri() {
 return myUri;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: YARN-5718. TimelineClient (and other places in YARN) shouldn't over-write HDFS client retry settings which could cause unexpected behavior. Contributed by Junping Du

2016-10-18 Thread sjlee
YARN-5718. TimelineClient (and other places in YARN) shouldn't over-write HDFS 
client retry settings which could cause unexpected behavior. Contributed by 
Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b733a6f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b733a6f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b733a6f8

Branch: refs/heads/HADOOP-13070
Commit: b733a6f86262522e535cebc972baecbe6a6eab50
Parents: b154d3e
Author: Xuan 
Authored: Tue Oct 18 11:04:49 2016 -0700
Committer: Xuan 
Committed: Tue Oct 18 11:06:47 2016 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 17 +
 .../api/impl/FileSystemTimelineWriter.java  |  7 ---
 .../nodelabels/FileSystemNodeLabelsStore.java   |  7 +--
 .../src/main/resources/yarn-default.xml | 20 
 .../recovery/FileSystemRMStateStore.java|  5 -
 .../recovery/TestFSRMStateStore.java|  4 
 6 files changed, 2 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b733a6f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 3bd0dcc..1a30c32 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -695,10 +695,6 @@ public class YarnConfiguration extends Configuration {
   /** URI for FileSystemRMStateStore */
   public static final String FS_RM_STATE_STORE_URI = RM_PREFIX
   + "fs.state-store.uri";
-  public static final String FS_RM_STATE_STORE_RETRY_POLICY_SPEC = RM_PREFIX
-  + "fs.state-store.retry-policy-spec";
-  public static final String DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC =
-  "2000, 500";
 
   public static final String FS_RM_STATE_STORE_NUM_RETRIES =
   RM_PREFIX + "fs.state-store.num-retries";
@@ -1974,13 +1970,6 @@ public class YarnConfiguration extends Configuration {
   TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_UNKNOWN_ACTIVE_SECONDS_DEFAULT
   = 24 * 60 * 60;
 
-  public static final String
-  TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC =
-  TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "retry-policy-spec";
-  public static final String
-  DEFAULT_TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC =
-  "2000, 500";
-
   public static final String TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE =
   TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX
   + "leveldb-cache-read-cache-size";
@@ -2600,11 +2589,7 @@ public class YarnConfiguration extends Configuration {
   /** URI for NodeLabelManager */
   public static final String FS_NODE_LABELS_STORE_ROOT_DIR = NODE_LABELS_PREFIX
   + "fs-store.root-dir";
-  public static final String FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC =
-  NODE_LABELS_PREFIX + "fs-store.retry-policy-spec";
-  public static final String DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC =
-  "2000, 500";
-  
+
   /**
* Flag to indicate if the node labels feature enabled, by default it's
* disabled

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b733a6f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
index 55d6bd2..b1284e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
@@ -106,13 +106,6 @@ public class FileSystemTimelineWriter extends 
TimelineWriter{
 super(authUgi, client, resURI);
 
 Configuration fsConf = new Configuration(conf);
-fsConf.setBoolean("dfs.client.retry.policy.enabled", true);
-String retryPolicy =
-fsConf.get(YarnConfiguration.
-

[34/50] [abbrv] hadoop git commit: HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei Yang.

2016-10-18 Thread sjlee
HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei 
Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fd4c37c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fd4c37c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fd4c37c

Branch: refs/heads/HADOOP-13070
Commit: 8fd4c37c45585d761d279f2f6032ff9c6c049895
Parents: b671ee6
Author: Xiaoyu Yao 
Authored: Mon Oct 17 08:22:31 2016 -0700
Committer: Xiaoyu Yao 
Committed: Mon Oct 17 14:21:36 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  40 +
 .../apache/hadoop/hdfs/TestDFSPermission.java   |  30 ++--
 .../org/apache/hadoop/hdfs/TestHDFSTrash.java   | 145 ++-
 3 files changed, 189 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd4c37c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index f80cd78..963aaa6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -70,6 +70,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -2014,4 +2015,43 @@ public class DFSTestUtil {
   }
 }, 1000, 6);
   }
+
+  /**
+   * Close current file system and create a new instance as given
+   * {@link UserGroupInformation}.
+   */
+  public static FileSystem login(final FileSystem fs,
+  final Configuration conf, final UserGroupInformation ugi)
+  throws IOException, InterruptedException {
+if (fs != null) {
+  fs.close();
+}
+return DFSTestUtil.getFileSystemAs(ugi, conf);
+  }
+
+  /**
+   * Test if the given {@link FileStatus} user, group owner and its permission
+   * are expected, throw {@link AssertionError} if any value is not expected.
+   */
+  public static void verifyFilePermission(FileStatus stat, String owner,
+  String group, FsAction u, FsAction g, FsAction o) {
+if(stat != null) {
+  if(!Strings.isNullOrEmpty(owner)) {
+assertEquals(owner, stat.getOwner());
+  }
+  if(!Strings.isNullOrEmpty(group)) {
+assertEquals(group, stat.getGroup());
+  }
+  FsPermission permission = stat.getPermission();
+  if(u != null) {
+assertEquals(u, permission.getUserAction());
+  }
+  if (g != null) {
+assertEquals(g, permission.getGroupAction());
+  }
+  if (o != null) {
+assertEquals(o, permission.getOtherAction());
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd4c37c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index d0d00e5..2705e67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -305,7 +305,7 @@ public class TestDFSPermission {
   fs.mkdirs(rootDir);
   fs.setPermission(rootDir, new FsPermission((short) 0777));
 
-  login(USER1);
+  fs = DFSTestUtil.login(fs, conf, USER1);
   fs.mkdirs(user1Dir);
   fs.setPermission(user1Dir, new FsPermission((short) 0755));
   fs.setOwner(user1Dir, USER1.getShortUserName(), GROUP2_NAME);
@@ -318,7 +318,7 @@ public class TestDFSPermission {
 // login as user2, attempt to delete /BSS/user1
 // this should fail because user2 has no permission to
 // its sub directory.
-login(USER2);
+fs = DFSTestUtil.login(fs, conf, USER2);
 fs.delete(user1Dir, true);
 fail("User2 should not be allowed to delete user1's dir.");
   } catch (AccessControlException e) {
@@ -331,7 +331,7 @@ public class TestDFSPermission {
   assertTrue(fs.exists(user1Dir));
 
   try {
-login(SUPERUSER);
+fs = DFSTestUtil.login(fs, conf, SUPERUSER);
 Trash 

[32/50] [abbrv] hadoop git commit: HADOOP-13724. Fix a few typos in site markdown documents. Contributed by Ding Fei.

2016-10-18 Thread sjlee
HADOOP-13724. Fix a few typos in site markdown documents. Contributed by Ding 
Fei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/987ee511
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/987ee511
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/987ee511

Branch: refs/heads/HADOOP-13070
Commit: 987ee51141a15d3f4d1df4dc792a192b92b87b5f
Parents: 0f4afc8
Author: Andrew Wang 
Authored: Mon Oct 17 13:25:58 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 13:25:58 2016 -0700

--
 .../src/site/markdown/ClusterSetup.md   |  2 +-
 .../src/site/markdown/Compatibility.md  | 16 +--
 .../site/markdown/InterfaceClassification.md| 28 ++--
 .../src/site/markdown/filesystem/filesystem.md  | 17 ++--
 .../markdown/filesystem/fsdatainputstream.md| 16 +--
 .../site/markdown/filesystem/introduction.md| 12 -
 .../src/site/markdown/filesystem/model.md   |  7 ++---
 .../src/site/markdown/filesystem/notation.md|  2 +-
 .../src/site/markdown/filesystem/testing.md |  4 +--
 .../src/site/markdown/HadoopArchives.md.vm  |  2 +-
 10 files changed, 53 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/987ee511/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
index f222769..56b43e6 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
@@ -35,7 +35,7 @@ Installation
 
 Installing a Hadoop cluster typically involves unpacking the software on all 
the machines in the cluster or installing it via a packaging system as 
appropriate for your operating system. It is important to divide up the 
hardware into functions.
 
-Typically one machine in the cluster is designated as the NameNode and another 
machine the as ResourceManager, exclusively. These are the masters. Other 
services (such as Web App Proxy Server and MapReduce Job History server) are 
usually run either on dedicated hardware or on shared infrastrucutre, depending 
upon the load.
+Typically one machine in the cluster is designated as the NameNode and another 
machine as the ResourceManager, exclusively. These are the masters. Other 
services (such as Web App Proxy Server and MapReduce Job History server) are 
usually run either on dedicated hardware or on shared infrastructure, depending 
upon the load.
 
 The rest of the machines in the cluster act as both DataNode and NodeManager. 
These are the workers.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/987ee511/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index d7827b5..05b18b5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -68,7 +68,7 @@ Wire compatibility concerns data being transmitted over the 
wire between Hadoop
  Use Cases
 
 * Client-Server compatibility is required to allow users to continue using the 
old clients even after upgrading the server (cluster) to a later version (or 
vice versa). For example, a Hadoop 2.1.0 client talking to a Hadoop 2.3.0 
cluster.
-* Client-Server compatibility is also required to allow users to upgrade the 
client before upgrading the server (cluster). For example, a Hadoop 2.4.0 
client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side 
bug fixes ahead of full cluster upgrades. Note that new cluster features 
invoked by new client APIs or shell commands will not be usable. YARN 
applications that attempt to use new APIs (including new fields in data 
structures) that have not yet deployed to the cluster can expect link 
exceptions.
+* Client-Server compatibility is also required to allow users to upgrade the 
client before upgrading the server (cluster). For example, a Hadoop 2.4.0 
client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side 
bug fixes ahead of full cluster upgrades. Note that new cluster features 
invoked by new client APIs or shell commands will not be usable. YARN 
applications that attempt to use new APIs (including new fields in data 
structures) that have not yet been deployed to the cluster can expect link 

[28/50] [abbrv] hadoop git commit: YARN-5699. Retrospect yarn entity fields which are publishing in events info fields. Contributed by Rohith Sharma K S.

2016-10-18 Thread sjlee
YARN-5699. Retrospect yarn entity fields which are publishing in events info 
fields. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f304b0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f304b0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f304b0c

Branch: refs/heads/HADOOP-13070
Commit: 1f304b0c7f261369dd68839507bb609a949965ad
Parents: 5f4ae85
Author: Sangjin Lee 
Authored: Sat Oct 15 13:54:40 2016 -0700
Committer: Sangjin Lee 
Committed: Sat Oct 15 13:54:40 2016 -0700

--
 ...pplicationHistoryManagerOnTimelineStore.java |  69 ++---
 ...pplicationHistoryManagerOnTimelineStore.java |  38 +++
 .../metrics/AppAttemptMetricsConstants.java |  16 +--
 .../metrics/ContainerMetricsConstants.java  |  21 ++--
 .../timelineservice/NMTimelinePublisher.java|  34 ---
 .../metrics/TimelineServiceV1Publisher.java |  44 
 .../metrics/TimelineServiceV2Publisher.java | 101 +--
 .../metrics/TestSystemMetricsPublisher.java |  40 
 8 files changed, 186 insertions(+), 177 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f304b0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index feeafdd..6e6576a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -463,21 +463,21 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   if (eventInfo == null) {
 continue;
   }
-  if 
(eventInfo.containsKey(AppAttemptMetricsConstants.HOST_EVENT_INFO)) {
+  if (eventInfo.containsKey(AppAttemptMetricsConstants.HOST_INFO)) {
 host =
-eventInfo.get(AppAttemptMetricsConstants.HOST_EVENT_INFO)
+eventInfo.get(AppAttemptMetricsConstants.HOST_INFO)
 .toString();
   }
   if (eventInfo
-  .containsKey(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.RPC_PORT_INFO)) {
 rpcPort = (Integer) eventInfo.get(
-AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO);
+AppAttemptMetricsConstants.RPC_PORT_INFO);
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_INFO)) {
 amContainerId =
 ContainerId.fromString(eventInfo.get(
-AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)
+AppAttemptMetricsConstants.MASTER_CONTAINER_INFO)
 .toString());
   }
 } else if (event.getEventType().equals(
@@ -487,39 +487,40 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 continue;
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.TRACKING_URL_INFO)) {
 trackingUrl =
 eventInfo.get(
-AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)
+AppAttemptMetricsConstants.TRACKING_URL_INFO)
 .toString();
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO)) {
+  .containsKey(
+  AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_INFO)) {
 originalTrackingUrl =
 eventInfo
 .get(
-

[43/50] [abbrv] hadoop git commit: YARN-5743. [Atsv2] Publish queue name and RMAppMetrics to ATS (Rohith Sharma K S via Varun Saxena)

2016-10-18 Thread sjlee
YARN-5743. [Atsv2] Publish queue name and RMAppMetrics to ATS (Rohith Sharma K 
S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b154d3ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b154d3ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b154d3ed

Branch: refs/heads/HADOOP-13070
Commit: b154d3edcee95254d41c237142870f39e826a519
Parents: d26a1bb
Author: Varun Saxena 
Authored: Tue Oct 18 23:32:52 2016 +0530
Committer: Varun Saxena 
Committed: Tue Oct 18 23:32:52 2016 +0530

--
 .../metrics/ApplicationMetricsConstants.java| 16 ++-
 .../metrics/TimelineServiceV2Publisher.java | 49 ++--
 .../TestSystemMetricsPublisherForV2.java| 18 ---
 3 files changed, 70 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b154d3ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
index 1774208..521e0af 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
@@ -71,10 +71,22 @@ public class ApplicationMetricsConstants {
   "YARN_APPLICATION_STATE";
   
   public static final String APP_CPU_METRICS =
-  "YARN_APPLICATION_CPU_METRIC";
+  "YARN_APPLICATION_CPU";
   
   public static final String APP_MEM_METRICS =
-  "YARN_APPLICATION_MEM_METRIC";
+  "YARN_APPLICATION_MEMORY";
+
+  public static final String APP_RESOURCE_PREEMPTED_CPU =
+  "YARN_APPLICATION_RESOURCE_PREEMPTED_CPU";
+
+  public static final String APP_RESOURCE_PREEMPTED_MEM =
+  "YARN_APPLICATION_RESOURCE_PREEMPTED_MEMORY";
+
+  public static final String APP_NON_AM_CONTAINER_PREEMPTED =
+  "YARN_APPLICATION_NON_AM_CONTAINER_PREEMPTED";
+
+  public static final String APP_AM_CONTAINER_PREEMPTED =
+  "YARN_APPLICATION_AM_CONTAINER_PREEMPTED";
 
   public static final String LATEST_APP_ATTEMPT_EVENT_INFO =
   "YARN_APPLICATION_LATEST_APP_ATTEMPT";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b154d3ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index dbdc1a8..f039ebe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.yarn.server.resourcemanager.metrics;
 
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -40,6 +42,7 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants;
@@ -104,6 +107,8 @@ public class TimelineServiceV2Publisher extends 

[15/50] [abbrv] hadoop git commit: HADOOP-13417. Fix javac and checkstyle warnings in hadoop-auth package.

2016-10-18 Thread sjlee
HADOOP-13417. Fix javac and checkstyle warnings in hadoop-auth package.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a5a7247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a5a7247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a5a7247

Branch: refs/heads/HADOOP-13070
Commit: 5a5a724731b74df9eed2de5f3370bcb8023fa2eb
Parents: d9f73f1
Author: Akira Ajisaka 
Authored: Fri Oct 14 14:45:55 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Oct 14 14:45:55 2016 +0900

--
 .../client/AuthenticatorTestCase.java   | 49 
 1 file changed, 29 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a5a7247/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
index 8f35e13..35e40d8 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
@@ -20,14 +20,15 @@ import 
org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.http.HttpResponse;
 import org.apache.http.auth.AuthScope;
 import org.apache.http.auth.Credentials;
+import org.apache.http.client.CredentialsProvider;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.client.methods.HttpPost;
 import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.http.client.params.AuthPolicy;
 import org.apache.http.entity.InputStreamEntity;
-import org.apache.http.impl.auth.SPNegoSchemeFactory;
-import org.apache.http.impl.client.SystemDefaultHttpClient;
+import org.apache.http.impl.auth.SPNegoScheme;
+import org.apache.http.impl.client.BasicCredentialsProvider;
+import org.apache.http.impl.client.HttpClientBuilder;
 import org.apache.http.util.EntityUtils;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.servlet.Context;
@@ -53,6 +54,7 @@ import java.net.ServerSocket;
 import java.net.URL;
 import java.security.Principal;
 import java.util.Properties;
+
 import org.junit.Assert;
 
 public class AuthenticatorTestCase {
@@ -241,22 +243,29 @@ public class AuthenticatorTestCase {
 }
   }
 
-  private SystemDefaultHttpClient getHttpClient() {
-final SystemDefaultHttpClient httpClient = new SystemDefaultHttpClient();
-httpClient.getAuthSchemes().register(AuthPolicy.SPNEGO, new 
SPNegoSchemeFactory(true));
- Credentials use_jaas_creds = new Credentials() {
-   public String getPassword() {
- return null;
-   }
-
-   public Principal getUserPrincipal() {
- return null;
-   }
- };
-
- httpClient.getCredentialsProvider().setCredentials(
-   AuthScope.ANY, use_jaas_creds);
- return httpClient;
+  private HttpClient getHttpClient() {
+HttpClientBuilder builder = HttpClientBuilder.create();
+// Register auth schema
+builder.setDefaultAuthSchemeRegistry(
+s-> httpContext -> new SPNegoScheme(true, true)
+);
+
+Credentials useJaasCreds = new Credentials() {
+  public String getPassword() {
+return null;
+  }
+  public Principal getUserPrincipal() {
+return null;
+  }
+};
+
+CredentialsProvider jaasCredentialProvider
+= new BasicCredentialsProvider();
+jaasCredentialProvider.setCredentials(AuthScope.ANY, useJaasCreds);
+// Set credential provider
+builder.setDefaultCredentialsProvider(jaasCredentialProvider);
+
+return builder.build();
   }
 
   private void doHttpClientRequest(HttpClient httpClient, HttpUriRequest 
request) throws Exception {
@@ -273,7 +282,7 @@ public class AuthenticatorTestCase {
   protected void _testAuthenticationHttpClient(Authenticator authenticator, 
boolean doPost) throws Exception {
 start();
 try {
-  SystemDefaultHttpClient httpClient = getHttpClient();
+  HttpClient httpClient = getHttpClient();
   doHttpClientRequest(httpClient, new HttpGet(getBaseURL()));
 
   // Always do a GET before POST to trigger the SPNego negotiation


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

[01/50] [abbrv] hadoop git commit: HADOOP-13700. Remove unthrown IOException from TrashPolicy#initialize and #getInstance signatures.

2016-10-18 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13070 d84ab8a57 -> f0e56e364


HADOOP-13700. Remove unthrown IOException from TrashPolicy#initialize and 
#getInstance signatures.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12d739a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12d739a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12d739a3

Branch: refs/heads/HADOOP-13070
Commit: 12d739a34ba868b3f7f5adf7f37a60d4aca9061b
Parents: 85cd06f
Author: Andrew Wang 
Authored: Wed Oct 12 15:19:52 2016 -0700
Committer: Andrew Wang 
Committed: Wed Oct 12 15:19:52 2016 -0700

--
 .../src/main/java/org/apache/hadoop/fs/TrashPolicy.java| 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d739a3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
index 157b9ab..2fe3fd1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
@@ -53,9 +53,8 @@ public abstract class TrashPolicy extends Configured {
* not assume trash always under /user/$USER due to HDFS encryption zone.
* @param conf the configuration to be used
* @param fs the filesystem to be used
-   * @throws IOException
*/
-  public void initialize(Configuration conf, FileSystem fs) throws IOException{
+  public void initialize(Configuration conf, FileSystem fs) {
 throw new UnsupportedOperationException();
   }
 
@@ -137,8 +136,7 @@ public abstract class TrashPolicy extends Configured {
* @param fs the file system to be used
* @return an instance of TrashPolicy
*/
-  public static TrashPolicy getInstance(Configuration conf, FileSystem fs)
-  throws IOException {
+  public static TrashPolicy getInstance(Configuration conf, FileSystem fs) {
 Class trashClass = conf.getClass(
 "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
 TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: HDFS-10558. DiskBalancer: Print the full path to plan file. Contributed by Xiaobing Zhou.

2016-10-18 Thread sjlee
HDFS-10558. DiskBalancer: Print the full path to plan file. Contributed by 
Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30bb1970
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30bb1970
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30bb1970

Branch: refs/heads/HADOOP-13070
Commit: 30bb1970cc27c1345871a35bcf1220e520c1804b
Parents: 76cc84e
Author: Anu Engineer 
Authored: Fri Oct 14 17:07:59 2016 -0700
Committer: Anu Engineer 
Committed: Fri Oct 14 17:07:59 2016 -0700

--
 .../server/diskbalancer/command/Command.java| 21 +-
 .../diskbalancer/command/HelpCommand.java   |  2 +-
 .../diskbalancer/command/PlanCommand.java   | 55 +++
 .../diskbalancer/command/ReportCommand.java | 11 +--
 .../hadoop/hdfs/tools/DiskBalancerCLI.java  | 27 +---
 .../diskbalancer/DiskBalancerTestUtil.java  | 72 
 .../server/diskbalancer/TestDiskBalancer.java   |  4 +-
 .../command/TestDiskBalancerCommand.java| 61 +++--
 8 files changed, 218 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30bb1970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index 2497669..11c8e7f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -53,6 +53,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URL;
@@ -82,6 +83,7 @@ public abstract class Command extends Configured {
   private FileSystem fs = null;
   private DiskBalancerCluster cluster = null;
   private int topNodes;
+  private PrintStream ps;
 
   private static final Path DEFAULT_LOG_DIR = new Path("/system/diskbalancer");
 
@@ -91,9 +93,25 @@ public abstract class Command extends Configured {
* Constructs a command.
*/
   public Command(Configuration conf) {
+this(conf, System.out);
+  }
+
+  /**
+   * Constructs a command.
+   */
+  public Command(Configuration conf, final PrintStream ps) {
 super(conf);
 // These arguments are valid for all commands.
 topNodes = 0;
+this.ps = ps;
+  }
+
+  /**
+   * Gets printing stream.
+   * @return print stream
+   */
+  PrintStream getPrintStream() {
+return ps;
   }
 
   /**
@@ -423,7 +441,8 @@ public abstract class Command extends Configured {
*
* @return Cluster.
*/
-  protected DiskBalancerCluster getCluster() {
+  @VisibleForTesting
+  DiskBalancerCluster getCluster() {
 return cluster;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30bb1970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
index c735299..f7c84e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
@@ -78,7 +78,7 @@ public class HelpCommand extends Command {
   command = new CancelCommand(getConf());
   break;
 case DiskBalancerCLI.REPORT:
-  command = new ReportCommand(getConf(), null);
+  command = new ReportCommand(getConf());
   break;
 default:
   command = this;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30bb1970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index 9749409..1d07a63 100644
--- 

[35/50] [abbrv] hadoop git commit: YARN-5466. DefaultContainerExecutor needs JavaDocs (templedf via rkanter)

2016-10-18 Thread sjlee
YARN-5466. DefaultContainerExecutor needs JavaDocs (templedf via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5d92359
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5d92359
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5d92359

Branch: refs/heads/HADOOP-13070
Commit: f5d92359145dfb820a9521e00e2d44c4ee96e67e
Parents: 8fd4c37
Author: Robert Kanter 
Authored: Mon Oct 17 14:29:09 2016 -0700
Committer: Robert Kanter 
Committed: Mon Oct 17 14:29:09 2016 -0700

--
 .../nodemanager/DefaultContainerExecutor.java   | 272 ---
 .../WindowsSecureContainerExecutor.java |   2 +-
 2 files changed, 231 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5d92359/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index 59b69ac..568c80b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -65,6 +65,11 @@ import 
org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 
+/**
+ * The {@code DefaultContainerExecuter} class offers generic container
+ * execution services. Process execution is handled in a platform-independent
+ * way via {@link ProcessBuilder}.
+ */
 public class DefaultContainerExecutor extends ContainerExecutor {
 
   private static final Log LOG = LogFactory
@@ -72,10 +77,17 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 
   private static final int WIN_MAX_PATH = 260;
 
+  /**
+   * A {@link FileContext} for the local file system.
+   */
   protected final FileContext lfs;
 
   private String logDirPermissions = null;
 
+  /**
+   * Default constructor for use in testing.
+   */
+  @VisibleForTesting
   public DefaultContainerExecutor() {
 try {
   this.lfs = FileContext.getLocalFSFileContext();
@@ -84,15 +96,40 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  /**
+   * Create an instance with a given {@link FileContext}.
+   *
+   * @param lfs the given {@link FileContext}
+   */
   DefaultContainerExecutor(FileContext lfs) {
 this.lfs = lfs;
   }
 
+  /**
+   * Copy a file using the {@link #lfs} {@link FileContext}.
+   *
+   * @param src the file to copy
+   * @param dst where to copy the file
+   * @param owner the owner of the new copy. Used only in secure Windows
+   * clusters
+   * @throws IOException when the copy fails
+   * @see WindowsSecureContainerExecutor
+   */
   protected void copyFile(Path src, Path dst, String owner) throws IOException 
{
 lfs.util().copy(src, dst, false, true);
   }
   
-  protected void setScriptExecutable(Path script, String owner) throws 
IOException {
+  /**
+   * Make a file executable using the {@link #lfs} {@link FileContext}.
+   *
+   * @param script the path to make executable
+   * @param owner the new owner for the file. Used only in secure Windows
+   * clusters
+   * @throws IOException when the change mode operation fails
+   * @see WindowsSecureContainerExecutor
+   */
+  protected void setScriptExecutable(Path script, String owner)
+  throws IOException {
 lfs.setPermission(script, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
   }
 
@@ -122,14 +159,16 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 // randomly choose the local directory
 Path appStorageDir = getWorkingDir(localDirs, user, appId);
 
-String tokenFn = String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, 
locId);
+String tokenFn =
+String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId);
 Path tokenDst = new Path(appStorageDir, tokenFn);
 copyFile(nmPrivateContainerTokensPath, tokenDst, user);
-LOG.info("Copying from " + nmPrivateContainerTokensPath + " to " + 
tokenDst);
+LOG.info("Copying from " + nmPrivateContainerTokensPath
++ " to " + 

[36/50] [abbrv] hadoop git commit: HDFS-9390. Block management for maintenance states.

2016-10-18 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61fb267/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index 63617ad..c125f45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -18,13 +18,19 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -32,6 +38,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
@@ -40,13 +48,23 @@ import org.junit.Test;
  * This class tests node maintenance.
  */
 public class TestMaintenanceState extends AdminStatesBaseTest {
-  public static final Log LOG = LogFactory.getLog(TestMaintenanceState.class);
-  static private final long EXPIRATION_IN_MS = 500;
+  public static final Logger LOG =
+  LoggerFactory.getLogger(TestMaintenanceState.class);
+  static private final long EXPIRATION_IN_MS = 50;
+  private int minMaintenanceR =
+  DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_DEFAULT;
 
   public TestMaintenanceState() {
 setUseCombinedHostFileManager();
   }
 
+  void setMinMaintenanceR(int minMaintenanceR) {
+this.minMaintenanceR = minMaintenanceR;
+getConf().setInt(
+DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
+minMaintenanceR);
+  }
+
   /**
* Verify a node can transition from AdminStates.ENTERING_MAINTENANCE to
* AdminStates.NORMAL.
@@ -55,21 +73,25 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
   public void testTakeNodeOutOfEnteringMaintenance() throws Exception {
 LOG.info("Starting testTakeNodeOutOfEnteringMaintenance");
 final int replicas = 1;
-final int numNamenodes = 1;
-final int numDatanodes = 1;
-final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+final Path file = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
 
-startCluster(numNamenodes, numDatanodes);
+startCluster(1, 1);
 
-FileSystem fileSys = getCluster().getFileSystem(0);
-writeFile(fileSys, file1, replicas, 1);
+final FileSystem fileSys = getCluster().getFileSystem(0);
+final FSNamesystem ns = getCluster().getNamesystem(0);
+writeFile(fileSys, file, replicas, 1);
 
-DatanodeInfo nodeOutofService = takeNodeOutofService(0,
+final DatanodeInfo nodeOutofService = takeNodeOutofService(0,
 null, Long.MAX_VALUE, null, AdminStates.ENTERING_MAINTENANCE);
 
+// When node is in ENTERING_MAINTENANCE state, it can still serve read
+// requests
+assertNull(checkWithRetry(ns, fileSys, file, replicas, null,
+nodeOutofService));
+
 putNodeInService(0, nodeOutofService.getDatanodeUuid());
 
-cleanupFile(fileSys, file1);
+cleanupFile(fileSys, file);
   }
 
   /**
@@ -80,23 +102,21 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
   public void testEnteringMaintenanceExpiration() throws Exception {
 LOG.info("Starting testEnteringMaintenanceExpiration");
 final int replicas = 1;
-final int numNamenodes = 1;
-final int numDatanodes = 1;
-final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+final Path file = new Path("/testEnteringMaintenanceExpiration.dat");
 
-startCluster(numNamenodes, numDatanodes);
+startCluster(1, 1);
 
-FileSystem fileSys = getCluster().getFileSystem(0);
-writeFile(fileSys, file1, replicas, 1);
+final FileSystem fileSys = getCluster().getFileSystem(0);
+writeFile(fileSys, file, replicas, 1);
 
-// expires in 500 milliseconds
-DatanodeInfo nodeOutofService = 

[37/50] [abbrv] hadoop git commit: HDFS-9390. Block management for maintenance states.

2016-10-18 Thread sjlee
HDFS-9390. Block management for maintenance states.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b61fb267
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b61fb267
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b61fb267

Branch: refs/heads/HADOOP-13070
Commit: b61fb267b92b2736920b4bd0c673d31e7632ebb9
Parents: f5d9235
Author: Ming Ma 
Authored: Mon Oct 17 17:45:41 2016 -0700
Committer: Ming Ma 
Committed: Mon Oct 17 17:45:41 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   5 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  53 +-
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  11 +-
 .../server/blockmanagement/BlockManager.java| 249 --
 .../BlockPlacementPolicyDefault.java|   4 +-
 .../CacheReplicationMonitor.java|   2 +-
 .../blockmanagement/DatanodeDescriptor.java |  35 +-
 .../server/blockmanagement/DatanodeManager.java |  47 +-
 .../blockmanagement/DecommissionManager.java| 142 +++-
 .../blockmanagement/ErasureCodingWork.java  |  16 +-
 .../blockmanagement/HeartbeatManager.java   |  23 +-
 .../blockmanagement/LowRedundancyBlocks.java|  47 +-
 .../server/blockmanagement/NumberReplicas.java  |  30 +-
 .../blockmanagement/StorageTypeStats.java   |   8 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   9 +-
 .../src/main/resources/hdfs-default.xml |   7 +
 .../apache/hadoop/hdfs/AdminStatesBaseTest.java |  20 +-
 .../apache/hadoop/hdfs/TestDecommission.java|   2 +-
 .../hadoop/hdfs/TestMaintenanceState.java   | 775 +--
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../namenode/TestDecommissioningStatus.java |  57 +-
 .../namenode/TestNamenodeCapacityReport.java|  78 +-
 .../hadoop/hdfs/util/HostsFileWriter.java   |   1 +
 23 files changed, 1240 insertions(+), 389 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61fb267/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 10c0ad6..d54c109 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -220,6 +220,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.namenode.reconstruction.pending.timeout-sec";
   public static final int 
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
 
+  public static final String  DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY =
+  "dfs.namenode.maintenance.replication.min";
+  public static final int DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_DEFAULT
+  = 1;
+
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY;
   public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61fb267/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 83870cf..23166e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -124,48 +124,57 @@ public class DFSUtil {
   }
 
   /**
-   * Compartor for sorting DataNodeInfo[] based on decommissioned states.
-   * Decommissioned nodes are moved to the end of the array on sorting with
-   * this compartor.
+   * Comparator for sorting DataNodeInfo[] based on
+   * decommissioned and entering_maintenance states.
*/
-  public static final Comparator DECOM_COMPARATOR = 
-new Comparator() {
-  @Override
-  public int compare(DatanodeInfo a, DatanodeInfo b) {
-return a.isDecommissioned() == b.isDecommissioned() ? 0 : 
-  a.isDecommissioned() ? 1 : -1;
+  public static class ServiceComparator implements Comparator {
+@Override
+public int compare(DatanodeInfo a, DatanodeInfo b) {
+  // Decommissioned nodes will still be moved to the end of the list
+  if (a.isDecommissioned()) {
+return b.isDecommissioned() ? 0 : 1;
+  } else if 

[17/50] [abbrv] hadoop git commit: HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin.

2016-10-18 Thread sjlee
HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a9f6635
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a9f6635
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a9f6635

Branch: refs/heads/HADOOP-13070
Commit: 8a9f6635a33e9648e9396e9ec5571fa34aa0c773
Parents: dbe663d
Author: Kihwal Lee 
Authored: Fri Oct 14 11:38:48 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 14 11:38:48 2016 -0500

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a9f6635/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 696b2aa..d856065 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,6 +86,8 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
+DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
+
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -98,6 +100,7 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
+waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -105,7 +108,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -182,7 +185,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -199,7 +202,8 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForBlocksToDelete() throws Exception {
+  private void waitForNumPendingDeletionBlocks(final int numBlocks)
+  throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -207,7 +211,8 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks()
+  == numBlocks) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin.

2016-10-18 Thread sjlee
HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdce5150
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdce5150
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdce5150

Branch: refs/heads/HADOOP-13070
Commit: fdce515091f0a61ffd6c9ae464a68447dedf1124
Parents: 008122b
Author: Andrew Wang 
Authored: Thu Oct 13 11:41:37 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 11:41:37 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdce5150/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 696b2aa..19f3178 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,6 +86,8 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
+DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
+
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -98,6 +100,7 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
+waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -105,7 +108,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -182,7 +185,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -199,7 +202,8 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForBlocksToDelete() throws Exception {
+  private void waitForNumPendingDeletionBlocks(int numBlocks)
+  throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -207,7 +211,8 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks()
+  == numBlocks) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. Contributed by Xiaobing Zhou.

2016-10-18 Thread sjlee
HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. 
Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b371c563
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b371c563
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b371c563

Branch: refs/heads/HADOOP-13070
Commit: b371c56365c14bbab0f5cdfffc0becaabfde8145
Parents: 1291254
Author: Anu Engineer 
Authored: Thu Oct 13 10:26:07 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 13 10:26:07 2016 -0700

--
 .../server/diskbalancer/TestDiskBalancer.java   | 44 +---
 1 file changed, 11 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b371c563/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index d911e74..9985210 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -44,7 +44,6 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
-import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -137,6 +136,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -144,6 +144,7 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap})
 .build();
 try {
   DataMover dataMover = new DataMover(cluster, dataNodeIndex,
@@ -174,7 +175,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
-
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -182,9 +183,9 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap, cap})
 .build();
 
-
 try {
   DataMover dataMover = new DataMover(cluster, dataNodeIndex,
   sourceDiskIndex, conf, blockSize, blockCount);
@@ -221,6 +222,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -228,6 +230,7 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap})
 .build();
 
 try {
@@ -246,24 +249,6 @@ public class TestDiskBalancer {
   }
 
   /**
-   * Sets alll Disks capacity to size specified.
-   *
-   * @param cluster - DiskBalancerCluster
-   * @param size- new size of the disk
-   */
-  private void setVolumeCapacity(DiskBalancerCluster cluster, long size,
- String diskType) {
-Preconditions.checkNotNull(cluster);
-for (DiskBalancerDataNode node : cluster.getNodes()) {
-  for (DiskBalancerVolume vol :
-  node.getVolumeSets().get(diskType).getVolumes()) {
-vol.setCapacity(size);
-  }
-  node.getVolumeSets().get(diskType).computeVolumeDataDensity();
-}
-  }
-
-  /**
* Helper class that allows us to create different kinds of MiniDFSClusters
* and populate data.
*/
@@ -274,6 +259,7 @@ public class TestDiskBalancer {
 private int fileLen;
 private int blockCount;
 private int diskCount;
+private long[] capacities;
 
 public 

[18/50] [abbrv] hadoop git commit: HDFS-10735 Distcp using webhdfs on secure HA clusters fails with StandbyException

2016-10-18 Thread sjlee
HDFS-10735 Distcp using webhdfs on secure HA clusters fails with 
StandbyException


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/701c27a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/701c27a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/701c27a7

Branch: refs/heads/HADOOP-13070
Commit: 701c27a7762294e1a5fb2b3ac81f5534aa37f667
Parents: 8a9f663
Author: Benoy Antony 
Authored: Fri Oct 14 10:26:39 2016 -0700
Committer: Benoy Antony 
Committed: Fri Oct 14 10:26:39 2016 -0700

--
 .../java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java   | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/701c27a7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 19de5b5..af43d56 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
@@ -471,6 +472,13 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   IOException re = JsonUtilClient.toRemoteException(m);
+
+  //check if exception is due to communication with a Standby name node
+  if (re.getMessage() != null && re.getMessage().endsWith(
+  StandbyException.class.getSimpleName())) {
+LOG.trace("Detected StandbyException", re);
+throw new IOException(re);
+  }
   // extract UGI-related exceptions and unwrap InvalidToken
   // the NN mangles these exceptions but the DN does not and may need
   // to re-fetch a token if either report the token is expired


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HADOOP-13522. Add %A and %a formats for fs -stat command to print permissions. Contributed by Alex Garbarini.

2016-10-18 Thread sjlee
HADOOP-13522. Add %A and %a formats for fs -stat command to print permissions. 
Contributed by Alex Garbarini.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bedfec0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bedfec0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bedfec0c

Branch: refs/heads/HADOOP-13070
Commit: bedfec0c10144087168bc79501ffd5ab4fa52606
Parents: 0bc6d37
Author: Akira Ajisaka 
Authored: Tue Oct 18 14:37:32 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Oct 18 15:00:44 2016 +0900

--
 .../hadoop/fs/permission/FsPermission.java  | 12 
 .../java/org/apache/hadoop/fs/shell/Stat.java   | 11 ++-
 .../src/site/markdown/FileSystemShell.md|  4 ++--
 .../src/test/resources/testConf.xml |  6 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 20 +---
 5 files changed, 46 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bedfec0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 48a5b1c..fabfc12 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -183,6 +183,18 @@ public class FsPermission implements Writable {
 return toShort();
   }
 
+  /**
+   * Returns the FsPermission in an octal format.
+   *
+   * @return short Unlike {@link #toShort()} which provides a binary
+   * representation, this method returns the standard octal style permission.
+   */
+  public short toOctal() {
+int n = this.toShort();
+int octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
+return (short)octal;
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (obj instanceof FsPermission) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bedfec0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
index 458d3ee..42f7843 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.fs.FileStatus;
 /**
  * Print statistics about path in specified format.
  * Format sequences:
+ *   %a: Permissions in octal
+ *   %A: Permissions in symbolic style
  *   %b: Size of file in blocks
  *   %F: Type
  *   %g: Group name of owner
@@ -56,7 +58,8 @@ class Stat extends FsCommand {
   public static final String USAGE = "[format]  ...";
   public static final String DESCRIPTION =
 "Print statistics about the file/directory at " + NEWLINE +
-"in the specified format. Format accepts filesize in" + NEWLINE +
+"in the specified format. Format accepts permissions in" + NEWLINE +
+"octal (%a) and symbolic (%A), filesize in" + NEWLINE +
 "blocks (%b), type (%F), group name of owner (%g)," + NEWLINE +
 "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
 "of owner (%u), modification date (%y, %Y)." + NEWLINE +
@@ -95,6 +98,12 @@ class Stat extends FsCommand {
 // this silently drops a trailing %?
 if (i + 1 == fmt.length) break;
 switch (fmt[++i]) {
+  case 'a':
+buf.append(stat.getPermission().toOctal());
+break;
+  case 'A':
+buf.append(stat.getPermission());
+break;
   case 'b':
 buf.append(stat.getLen());
 break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bedfec0c/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index ee7bc28..060c775 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -639,11 +639,11 @@ stat
 
 

[20/50] [abbrv] hadoop git commit: HDFS-10827. When there are unrecoverable ec block groups, Namenode Web UI doesn't show the block names. Contributed by Takanobu Asanuma.

2016-10-18 Thread sjlee
HDFS-10827. When there are unrecoverable ec block groups, Namenode Web UI 
doesn't show the block names. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/adb96e10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/adb96e10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/adb96e10

Branch: refs/heads/HADOOP-13070
Commit: adb96e109f1ab4a2c3d469e716c084d0a891b951
Parents: 0007360
Author: Jing Zhao 
Authored: Fri Oct 14 13:21:53 2016 -0700
Committer: Jing Zhao 
Committed: Fri Oct 14 13:21:53 2016 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../server/namenode/TestNameNodeMXBean.java | 105 +++
 2 files changed, 106 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/adb96e10/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 8c59186..563682f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4999,7 +4999,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 BlockInfo blk = blkIterator.next();
 final INodeFile inode = getBlockCollection(blk);
 skip++;
-if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) 
{
+if (inode != null) {
   String src = inode.getFullPathName();
   if (src.startsWith(path)){
 corruptFiles.add(new CorruptFileBlockInfo(src, blk));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/adb96e10/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index ac97a36..47f1c85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -17,35 +17,48 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.base.Supplier;
 import com.google.common.util.concurrent.Uninterruptibles;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
 import org.apache.hadoop.net.ServerSocketUtil;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.VersionInfo;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mortbay.util.ajax.JSON;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 import java.io.File;
+import java.io.IOException;
 

[02/50] [abbrv] hadoop git commit: HDFS-11002. Fix broken attr/getfattr/setfattr links in ExtendedAttributes.md. Contributed by Mingliang Liu.

2016-10-18 Thread sjlee
HDFS-11002. Fix broken attr/getfattr/setfattr links in ExtendedAttributes.md. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/901eca00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/901eca00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/901eca00

Branch: refs/heads/HADOOP-13070
Commit: 901eca004d0e7e413b109a93128892176c808d61
Parents: 12d739a
Author: Akira Ajisaka 
Authored: Thu Oct 13 14:29:30 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Oct 13 14:29:30 2016 +0900

--
 .../hadoop-hdfs/src/site/markdown/ExtendedAttributes.md  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/901eca00/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
index 5a20986..eb527ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
@@ -30,7 +30,7 @@ Overview
 
 ### HDFS extended attributes
 
-Extended attributes in HDFS are modeled after extended attributes in Linux 
(see the Linux manpage for 
[attr(5)](http://www.bestbits.at/acl/man/man5/attr.txt) and [related 
documentation](http://www.bestbits.at/acl/)). An extended attribute is a 
*name-value pair*, with a string name and binary value. Xattrs names must also 
be prefixed with a *namespace*. For example, an xattr named *myXattr* in the 
*user* namespace would be specified as **user.myXattr**. Multiple xattrs can be 
associated with a single inode.
+Extended attributes in HDFS are modeled after extended attributes in Linux 
(see the Linux manpage for 
[attr(5)](http://man7.org/linux/man-pages/man5/attr.5.html)). An extended 
attribute is a *name-value pair*, with a string name and binary value. Xattrs 
names must also be prefixed with a *namespace*. For example, an xattr named 
*myXattr* in the *user* namespace would be specified as **user.myXattr**. 
Multiple xattrs can be associated with a single inode.
 
 ### Namespaces and Permissions
 
@@ -49,7 +49,7 @@ The `raw` namespace is reserved for internal system 
attributes that sometimes ne
 Interacting with extended attributes
 
 
-The Hadoop shell has support for interacting with extended attributes via 
`hadoop fs -getfattr` and `hadoop fs -setfattr`. These commands are styled 
after the Linux [getfattr(1)](http://www.bestbits.at/acl/man/man1/getfattr.txt) 
and [setfattr(1)](http://www.bestbits.at/acl/man/man1/setfattr.txt) commands.
+The Hadoop shell has support for interacting with extended attributes via 
`hadoop fs -getfattr` and `hadoop fs -setfattr`. These commands are styled 
after the Linux 
[getfattr(1)](http://man7.org/linux/man-pages/man1/getfattr.1.html) and 
[setfattr(1)](http://man7.org/linux/man-pages/man1/setfattr.1.html) commands.
 
 ### getfattr
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception stacktrace. Contributed by Hanisha Koneru.

2016-10-18 Thread sjlee
HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception 
stacktrace. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/008122b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/008122b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/008122b3

Branch: refs/heads/HADOOP-13070
Commit: 008122b3c927767ac96dc876124bc591e10c9df4
Parents: 9097e2e
Author: Arpit Agarwal 
Authored: Thu Oct 13 11:37:03 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Oct 13 11:37:03 2016 -0700

--
 .../src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/008122b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
index 505f76d..a2b6980 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
@@ -177,7 +177,8 @@ public abstract class CachingGetSpaceUsed implements 
Closeable, GetSpaceUsed {
   // update the used variable
   spaceUsed.refresh();
 } catch (InterruptedException e) {
-  LOG.warn("Thread Interrupted waiting to refresh disk information", 
e);
+  LOG.warn("Thread Interrupted waiting to refresh disk information: "
+  + e.getMessage());
   Thread.currentThread().interrupt();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org