hadoop git commit: HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. Contributed by Jinhu Wu.

2018-11-13 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 5884b1c28 -> ef085e088


HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. 
Contributed by Jinhu Wu.

(cherry picked from commit 3fade865ce84dcf68bcd7de5a5ed1c7d904796e9)
(cherry picked from commit 64cb97fb4467513f73fde18f96f391ad34e3bb0a)
(cherry picked from commit 5d532cfc6f23f942ed10edab55ed251eb99a0664)
(cherry picked from commit 37082a664aaf99bc40522a8dfa231d71792dd976)
(cherry picked from commit 3aac324a0760b097f7d91139a2352b13236461f7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef085e08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef085e08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef085e08

Branch: refs/heads/branch-2.9
Commit: ef085e0880a3309668603e4acb48b7f9dbe9e6ce
Parents: 5884b1c
Author: Sammi Chen 
Authored: Wed Nov 14 12:58:57 2018 +0800
Committer: Sammi Chen 
Committed: Wed Nov 14 13:57:58 2018 +0800

--
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  4 --
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 22 --
 .../site/markdown/tools/hadoop-aliyun/index.md  |  5 ++
 .../oss/TestAliyunOSSBlockOutputStream.java | 70 +---
 4 files changed, 83 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef085e08/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 7356818..809c8c8 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -457,7 +457,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 
   ObjectListing objects = store.listObjects(key, maxKeys, null, false);
   while (true) {
-statistics.incrementReadOps(1);
 for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
   String objKey = objectSummary.getKey();
   if (objKey.equals(key + "/")) {
@@ -498,7 +497,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   }
   String nextMarker = objects.getNextMarker();
   objects = store.listObjects(key, maxKeys, nextMarker, false);
-  statistics.incrementReadOps(1);
 } else {
   break;
 }
@@ -694,7 +692,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 new SemaphoredDelegatingExecutor(boundedCopyThreadPool,
 maxConcurrentCopyTasksPerDir, true));
 ObjectListing objects = store.listObjects(srcKey, maxKeys, null, true);
-statistics.incrementReadOps(1);
 // Copy files from src folder to dst
 int copiesToFinish = 0;
 while (true) {
@@ -717,7 +714,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   if (objects.isTruncated()) {
 String nextMarker = objects.getNextMarker();
 objects = store.listObjects(srcKey, maxKeys, nextMarker, true);
-statistics.incrementReadOps(1);
   } else {
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef085e08/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index f13ac32..f0413e3 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -175,6 +175,7 @@ public class AliyunOSSFileSystemStore {
   CannedAccessControlList cannedACL =
   CannedAccessControlList.valueOf(cannedACLName);
   ossClient.setBucketAcl(bucketName, cannedACL);
+  statistics.incrementWriteOps(1);
 }
 
 maxKeys = conf.getInt(MAX_PAGING_KEYS_KEY, MAX_PAGING_KEYS_DEFAULT);
@@ -216,6 +217,7 @@ public class AliyunOSSFileSystemStore {
   // Here, we choose the simple mode to do batch delete.
   deleteRequest.setQuiet(true);
   DeleteObjectsResult result = ossClient.deleteObjects(deleteRequest);
+  statistics.incrementWriteOps(1);
   deleteFailed = result.getDeletedObjects();
   tries++;
   if (tries == retry) {
@@ -268,11 +270,13 @@ 

hadoop git commit: HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. Contributed by Jinhu Wu.

2018-11-13 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a86b66534 -> 3aac324a0


HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. 
Contributed by Jinhu Wu.

(cherry picked from commit 3fade865ce84dcf68bcd7de5a5ed1c7d904796e9)
(cherry picked from commit 64cb97fb4467513f73fde18f96f391ad34e3bb0a)
(cherry picked from commit 5d532cfc6f23f942ed10edab55ed251eb99a0664)
(cherry picked from commit 37082a664aaf99bc40522a8dfa231d71792dd976)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3aac324a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3aac324a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3aac324a

Branch: refs/heads/branch-2
Commit: 3aac324a0760b097f7d91139a2352b13236461f7
Parents: a86b665
Author: Sammi Chen 
Authored: Wed Nov 14 12:58:57 2018 +0800
Committer: Sammi Chen 
Committed: Wed Nov 14 13:53:53 2018 +0800

--
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  4 --
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 22 --
 .../site/markdown/tools/hadoop-aliyun/index.md  |  5 ++
 .../oss/TestAliyunOSSBlockOutputStream.java | 70 +---
 4 files changed, 83 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac324a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 7356818..809c8c8 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -457,7 +457,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 
   ObjectListing objects = store.listObjects(key, maxKeys, null, false);
   while (true) {
-statistics.incrementReadOps(1);
 for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
   String objKey = objectSummary.getKey();
   if (objKey.equals(key + "/")) {
@@ -498,7 +497,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   }
   String nextMarker = objects.getNextMarker();
   objects = store.listObjects(key, maxKeys, nextMarker, false);
-  statistics.incrementReadOps(1);
 } else {
   break;
 }
@@ -694,7 +692,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 new SemaphoredDelegatingExecutor(boundedCopyThreadPool,
 maxConcurrentCopyTasksPerDir, true));
 ObjectListing objects = store.listObjects(srcKey, maxKeys, null, true);
-statistics.incrementReadOps(1);
 // Copy files from src folder to dst
 int copiesToFinish = 0;
 while (true) {
@@ -717,7 +714,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   if (objects.isTruncated()) {
 String nextMarker = objects.getNextMarker();
 objects = store.listObjects(srcKey, maxKeys, nextMarker, true);
-statistics.incrementReadOps(1);
   } else {
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac324a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index f13ac32..f0413e3 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -175,6 +175,7 @@ public class AliyunOSSFileSystemStore {
   CannedAccessControlList cannedACL =
   CannedAccessControlList.valueOf(cannedACLName);
   ossClient.setBucketAcl(bucketName, cannedACL);
+  statistics.incrementWriteOps(1);
 }
 
 maxKeys = conf.getInt(MAX_PAGING_KEYS_KEY, MAX_PAGING_KEYS_DEFAULT);
@@ -216,6 +217,7 @@ public class AliyunOSSFileSystemStore {
   // Here, we choose the simple mode to do batch delete.
   deleteRequest.setQuiet(true);
   DeleteObjectsResult result = ossClient.deleteObjects(deleteRequest);
+  statistics.incrementWriteOps(1);
   deleteFailed = result.getDeletedObjects();
   tries++;
   if (tries == retry) {
@@ -268,11 +270,13 @@ public class AliyunOSSFileSystemStore {
*/
   public ObjectMetadata g

hadoop git commit: HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. Contributed by Jinhu Wu.

2018-11-13 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 403984051 -> 37082a664


HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. 
Contributed by Jinhu Wu.

(cherry picked from commit 3fade865ce84dcf68bcd7de5a5ed1c7d904796e9)
(cherry picked from commit 64cb97fb4467513f73fde18f96f391ad34e3bb0a)
(cherry picked from commit 5d532cfc6f23f942ed10edab55ed251eb99a0664)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37082a66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37082a66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37082a66

Branch: refs/heads/branch-3.2
Commit: 37082a664aaf99bc40522a8dfa231d71792dd976
Parents: 4039840
Author: Sammi Chen 
Authored: Wed Nov 14 12:58:57 2018 +0800
Committer: Sammi Chen 
Committed: Wed Nov 14 13:48:51 2018 +0800

--
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  4 --
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 22 --
 .../site/markdown/tools/hadoop-aliyun/index.md  |  5 ++
 .../oss/TestAliyunOSSBlockOutputStream.java | 70 +---
 4 files changed, 83 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37082a66/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 4fbb6fb..9c4435c 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -405,7 +405,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 
   ObjectListing objects = store.listObjects(key, maxKeys, null, false);
   while (true) {
-statistics.incrementReadOps(1);
 for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
   String objKey = objectSummary.getKey();
   if (objKey.equals(key + "/")) {
@@ -446,7 +445,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   }
   String nextMarker = objects.getNextMarker();
   objects = store.listObjects(key, maxKeys, nextMarker, false);
-  statistics.incrementReadOps(1);
 } else {
   break;
 }
@@ -694,7 +692,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 new SemaphoredDelegatingExecutor(boundedCopyThreadPool,
 maxConcurrentCopyTasksPerDir, true));
 ObjectListing objects = store.listObjects(srcKey, maxKeys, null, true);
-statistics.incrementReadOps(1);
 // Copy files from src folder to dst
 int copiesToFinish = 0;
 while (true) {
@@ -717,7 +714,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   if (objects.isTruncated()) {
 String nextMarker = objects.getNextMarker();
 objects = store.listObjects(srcKey, maxKeys, nextMarker, true);
-statistics.incrementReadOps(1);
   } else {
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37082a66/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index 7639eb3..4fc1325 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -175,6 +175,7 @@ public class AliyunOSSFileSystemStore {
   CannedAccessControlList cannedACL =
   CannedAccessControlList.valueOf(cannedACLName);
   ossClient.setBucketAcl(bucketName, cannedACL);
+  statistics.incrementWriteOps(1);
 }
 
 maxKeys = conf.getInt(MAX_PAGING_KEYS_KEY, MAX_PAGING_KEYS_DEFAULT);
@@ -216,6 +217,7 @@ public class AliyunOSSFileSystemStore {
   // Here, we choose the simple mode to do batch delete.
   deleteRequest.setQuiet(true);
   DeleteObjectsResult result = ossClient.deleteObjects(deleteRequest);
+  statistics.incrementWriteOps(1);
   deleteFailed = result.getDeletedObjects();
   tries++;
   if (tries == retry) {
@@ -268,11 +270,13 @@ public class AliyunOSSFileSystemStore {
*/
   public ObjectMetadata getObjectMetadata(String key) {
 try {
-  return ossClient

hadoop git commit: HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. Contributed by Jinhu Wu.

2018-11-13 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 8ab6aa1b4 -> 5d532cfc6


HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. 
Contributed by Jinhu Wu.

(cherry picked from commit 3fade865ce84dcf68bcd7de5a5ed1c7d904796e9)
(cherry picked from commit 64cb97fb4467513f73fde18f96f391ad34e3bb0a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d532cfc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d532cfc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d532cfc

Branch: refs/heads/branch-3.1
Commit: 5d532cfc6f23f942ed10edab55ed251eb99a0664
Parents: 8ab6aa1
Author: Sammi Chen 
Authored: Wed Nov 14 12:58:57 2018 +0800
Committer: Sammi Chen 
Committed: Wed Nov 14 13:12:22 2018 +0800

--
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  4 --
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 22 --
 .../site/markdown/tools/hadoop-aliyun/index.md  |  5 ++
 .../oss/TestAliyunOSSBlockOutputStream.java | 70 +---
 4 files changed, 83 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d532cfc/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 93e31d5..d7061e5 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -405,7 +405,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 
   ObjectListing objects = store.listObjects(key, maxKeys, null, false);
   while (true) {
-statistics.incrementReadOps(1);
 for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
   String objKey = objectSummary.getKey();
   if (objKey.equals(key + "/")) {
@@ -446,7 +445,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   }
   String nextMarker = objects.getNextMarker();
   objects = store.listObjects(key, maxKeys, nextMarker, false);
-  statistics.incrementReadOps(1);
 } else {
   break;
 }
@@ -694,7 +692,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 new SemaphoredDelegatingExecutor(boundedCopyThreadPool,
 maxConcurrentCopyTasksPerDir, true));
 ObjectListing objects = store.listObjects(srcKey, maxKeys, null, true);
-statistics.incrementReadOps(1);
 // Copy files from src folder to dst
 int copiesToFinish = 0;
 while (true) {
@@ -717,7 +714,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   if (objects.isTruncated()) {
 String nextMarker = objects.getNextMarker();
 objects = store.listObjects(srcKey, maxKeys, nextMarker, true);
-statistics.incrementReadOps(1);
   } else {
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d532cfc/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index 0f418d7..646cd25 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -175,6 +175,7 @@ public class AliyunOSSFileSystemStore {
   CannedAccessControlList cannedACL =
   CannedAccessControlList.valueOf(cannedACLName);
   ossClient.setBucketAcl(bucketName, cannedACL);
+  statistics.incrementWriteOps(1);
 }
 
 maxKeys = conf.getInt(MAX_PAGING_KEYS_KEY, MAX_PAGING_KEYS_DEFAULT);
@@ -216,6 +217,7 @@ public class AliyunOSSFileSystemStore {
   // Here, we choose the simple mode to do batch delete.
   deleteRequest.setQuiet(true);
   DeleteObjectsResult result = ossClient.deleteObjects(deleteRequest);
+  statistics.incrementWriteOps(1);
   deleteFailed = result.getDeletedObjects();
   tries++;
   if (tries == retry) {
@@ -268,11 +270,13 @@ public class AliyunOSSFileSystemStore {
*/
   public ObjectMetadata getObjectMetadata(String key) {
 try {
-  return ossClient.getObjectMetadata(bucketName, key);
+  ObjectMetadata objectMeta

hadoop git commit: HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. Contributed by Jinhu Wu.

2018-11-13 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 04bba9158 -> 64cb97fb4


HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. 
Contributed by Jinhu Wu.

(cherry picked from commit 3fade865ce84dcf68bcd7de5a5ed1c7d904796e9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64cb97fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64cb97fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64cb97fb

Branch: refs/heads/branch-3.0
Commit: 64cb97fb4467513f73fde18f96f391ad34e3bb0a
Parents: 04bba91
Author: Sammi Chen 
Authored: Wed Nov 14 12:58:57 2018 +0800
Committer: Sammi Chen 
Committed: Wed Nov 14 13:09:11 2018 +0800

--
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  4 --
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 22 --
 .../site/markdown/tools/hadoop-aliyun/index.md  |  5 ++
 .../oss/TestAliyunOSSBlockOutputStream.java | 70 +---
 4 files changed, 83 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64cb97fb/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 93e31d5..d7061e5 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -405,7 +405,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 
   ObjectListing objects = store.listObjects(key, maxKeys, null, false);
   while (true) {
-statistics.incrementReadOps(1);
 for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
   String objKey = objectSummary.getKey();
   if (objKey.equals(key + "/")) {
@@ -446,7 +445,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   }
   String nextMarker = objects.getNextMarker();
   objects = store.listObjects(key, maxKeys, nextMarker, false);
-  statistics.incrementReadOps(1);
 } else {
   break;
 }
@@ -694,7 +692,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 new SemaphoredDelegatingExecutor(boundedCopyThreadPool,
 maxConcurrentCopyTasksPerDir, true));
 ObjectListing objects = store.listObjects(srcKey, maxKeys, null, true);
-statistics.incrementReadOps(1);
 // Copy files from src folder to dst
 int copiesToFinish = 0;
 while (true) {
@@ -717,7 +714,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   if (objects.isTruncated()) {
 String nextMarker = objects.getNextMarker();
 objects = store.listObjects(srcKey, maxKeys, nextMarker, true);
-statistics.incrementReadOps(1);
   } else {
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64cb97fb/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index 0f418d7..646cd25 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -175,6 +175,7 @@ public class AliyunOSSFileSystemStore {
   CannedAccessControlList cannedACL =
   CannedAccessControlList.valueOf(cannedACLName);
   ossClient.setBucketAcl(bucketName, cannedACL);
+  statistics.incrementWriteOps(1);
 }
 
 maxKeys = conf.getInt(MAX_PAGING_KEYS_KEY, MAX_PAGING_KEYS_DEFAULT);
@@ -216,6 +217,7 @@ public class AliyunOSSFileSystemStore {
   // Here, we choose the simple mode to do batch delete.
   deleteRequest.setQuiet(true);
   DeleteObjectsResult result = ossClient.deleteObjects(deleteRequest);
+  statistics.incrementWriteOps(1);
   deleteFailed = result.getDeletedObjects();
   tries++;
   if (tries == retry) {
@@ -268,11 +270,13 @@ public class AliyunOSSFileSystemStore {
*/
   public ObjectMetadata getObjectMetadata(String key) {
 try {
-  return ossClient.getObjectMetadata(bucketName, key);
+  ObjectMetadata objectMeta = ossClient.getObjectMetadata(bucketName, key);
+  statistics.in

hadoop git commit: HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. Contributed by Jinhu Wu.

2018-11-13 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/trunk a13be203b -> 3fade865c


HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. 
Contributed by Jinhu Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fade865
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fade865
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fade865

Branch: refs/heads/trunk
Commit: 3fade865ce84dcf68bcd7de5a5ed1c7d904796e9
Parents: a13be20
Author: Sammi Chen 
Authored: Wed Nov 14 12:58:57 2018 +0800
Committer: Sammi Chen 
Committed: Wed Nov 14 12:58:57 2018 +0800

--
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  4 --
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 22 --
 .../site/markdown/tools/hadoop-aliyun/index.md  |  5 ++
 .../oss/TestAliyunOSSBlockOutputStream.java | 70 +---
 4 files changed, 83 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fade865/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 4fbb6fb..9c4435c 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -405,7 +405,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 
   ObjectListing objects = store.listObjects(key, maxKeys, null, false);
   while (true) {
-statistics.incrementReadOps(1);
 for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
   String objKey = objectSummary.getKey();
   if (objKey.equals(key + "/")) {
@@ -446,7 +445,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   }
   String nextMarker = objects.getNextMarker();
   objects = store.listObjects(key, maxKeys, nextMarker, false);
-  statistics.incrementReadOps(1);
 } else {
   break;
 }
@@ -694,7 +692,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 new SemaphoredDelegatingExecutor(boundedCopyThreadPool,
 maxConcurrentCopyTasksPerDir, true));
 ObjectListing objects = store.listObjects(srcKey, maxKeys, null, true);
-statistics.incrementReadOps(1);
 // Copy files from src folder to dst
 int copiesToFinish = 0;
 while (true) {
@@ -717,7 +714,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   if (objects.isTruncated()) {
 String nextMarker = objects.getNextMarker();
 objects = store.listObjects(srcKey, maxKeys, nextMarker, true);
-statistics.incrementReadOps(1);
   } else {
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fade865/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index 7639eb3..4fc1325 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -175,6 +175,7 @@ public class AliyunOSSFileSystemStore {
   CannedAccessControlList cannedACL =
   CannedAccessControlList.valueOf(cannedACLName);
   ossClient.setBucketAcl(bucketName, cannedACL);
+  statistics.incrementWriteOps(1);
 }
 
 maxKeys = conf.getInt(MAX_PAGING_KEYS_KEY, MAX_PAGING_KEYS_DEFAULT);
@@ -216,6 +217,7 @@ public class AliyunOSSFileSystemStore {
   // Here, we choose the simple mode to do batch delete.
   deleteRequest.setQuiet(true);
   DeleteObjectsResult result = ossClient.deleteObjects(deleteRequest);
+  statistics.incrementWriteOps(1);
   deleteFailed = result.getDeletedObjects();
   tries++;
   if (tries == retry) {
@@ -268,11 +270,13 @@ public class AliyunOSSFileSystemStore {
*/
   public ObjectMetadata getObjectMetadata(String key) {
 try {
-  return ossClient.getObjectMetadata(bucketName, key);
+  ObjectMetadata objectMeta = ossClient.getObjectMetadata(bucketName, key);
+  statistics.incrementReadOps(1);
+  return objectMeta;
 } catch (OSSException osse) {

[hadoop] Git Push Summary

2018-11-13 Thread aajisaka
Repository: hadoop
Updated Tags:  refs/tags/YARN-5355-2017-05-25 [deleted] 193746245

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2018-11-13 Thread aajisaka
Repository: hadoop
Updated Tags:  refs/tags/release-3.2.0-RC0 [deleted] 534f03f20

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2018-11-13 Thread aajisaka
Repository: hadoop
Updated Tags:  refs/tags/YARN-5355-2017-05-25 [created] 193746245
  refs/tags/rel/release- [created] f474b7d22
  refs/tags/release-2.9.2-RC0 [created] 562ac7c57
  refs/tags/release-3.2.0-RC0 [created] 534f03f20

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HADOOP-15876. Use keySet().removeAll() to remove multiple keys from Map in AzureBlobFileSystemStore

2018-11-13 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 5afd7efe2 -> 403984051
  refs/heads/trunk fcbd205cc -> a13be203b


HADOOP-15876. Use keySet().removeAll() to remove multiple keys from Map in 
AzureBlobFileSystemStore

Contributed by Da Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a13be203
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a13be203
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a13be203

Branch: refs/heads/trunk
Commit: a13be203b7877ba56ef63aac4a2e65d4e1a4adbc
Parents: fcbd205
Author: Da Zhou 
Authored: Tue Nov 13 21:46:18 2018 +
Committer: Steve Loughran 
Committed: Tue Nov 13 21:46:18 2018 +

--
 .../org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java  | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a13be203/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index bfdbba8..f300a9a 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -678,9 +678,7 @@ public class AzureBlobFileSystemStore {
   }
 }
 
-for (Map.Entry defaultAclEntry : 
defaultAclEntries.entrySet()) {
-  aclEntries.remove(defaultAclEntry.getKey());
-}
+aclEntries.keySet().removeAll(defaultAclEntries.keySet());
 
 client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, 
true),
 AbfsAclHelper.serializeAclSpec(aclEntries), eTag);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-15876. Use keySet().removeAll() to remove multiple keys from Map in AzureBlobFileSystemStore

2018-11-13 Thread stevel
HADOOP-15876. Use keySet().removeAll() to remove multiple keys from Map in 
AzureBlobFileSystemStore

Contributed by Da Zhou.

(cherry picked from commit a13be203b7877ba56ef63aac4a2e65d4e1a4adbc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40398405
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40398405
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40398405

Branch: refs/heads/branch-3.2
Commit: 4039840510d39684b39563967803ecf3ef2429fa
Parents: 5afd7ef
Author: Da Zhou 
Authored: Tue Nov 13 21:48:05 2018 +
Committer: Steve Loughran 
Committed: Tue Nov 13 21:48:05 2018 +

--
 .../org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java  | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40398405/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index bfdbba8..f300a9a 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -678,9 +678,7 @@ public class AzureBlobFileSystemStore {
   }
 }
 
-for (Map.Entry defaultAclEntry : 
defaultAclEntries.entrySet()) {
-  aclEntries.remove(defaultAclEntry.getKey());
-}
+aclEntries.keySet().removeAll(defaultAclEntries.keySet());
 
 client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, 
true),
 AbfsAclHelper.serializeAclSpec(aclEntries), eTag);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-9001. [Submarine] Use AppAdminClient instead of ServiceClient to sumbit jobs. (Zac Zhou via wangda)

2018-11-13 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9da6054ca -> fcbd205cc


YARN-9001. [Submarine] Use AppAdminClient instead of ServiceClient to sumbit 
jobs. (Zac Zhou via wangda)

Change-Id: Ic3d6c1e439df9cdf74448b345b925343224efe51


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fcbd205c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fcbd205c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fcbd205c

Branch: refs/heads/trunk
Commit: fcbd205cc35e7411ac33860c78b9e1e70697bb4a
Parents: 9da6054
Author: Wangda Tan 
Authored: Tue Nov 13 13:13:27 2018 -0800
Committer: Wangda Tan 
Committed: Tue Nov 13 13:13:27 2018 -0800

--
 .../yarn/service/client/ServiceClient.java  |  1 +
 .../submarine/runtimes/common/JobMonitor.java   |  6 +++
 .../yarnservice/YarnServiceJobMonitor.java  | 27 +
 .../yarnservice/YarnServiceJobSubmitter.java| 42 ++--
 .../runtimes/yarnservice/YarnServiceUtils.java  | 15 +++
 .../yarnservice/TestYarnServiceRunJobCli.java   | 12 +++---
 .../submarine/common/MockClientContext.java |  1 -
 7 files changed, 80 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcbd205c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 0bc5a2c..713d890 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -1552,6 +1552,7 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
   LOG.info("Service {} does not have an application ID", serviceName);
   return appSpec;
 }
+appSpec.setId(currentAppId.toString());
 ApplicationReport appReport = 
yarnClient.getApplicationReport(currentAppId);
 appSpec.setState(convertState(appReport.getYarnApplicationState()));
 ApplicationTimeout lifetime =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcbd205c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/JobMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/JobMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/JobMonitor.java
index c81393b..35e21fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/JobMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/JobMonitor.java
@@ -48,6 +48,11 @@ public abstract class JobMonitor {
   throws IOException, YarnException;
 
   /**
+   * Cleanup AppAdminClient, etc.
+   */
+  public void cleanup() throws IOException {}
+
+  /**
* Continue wait and print status if job goes to ready or final state.
* @param jobName
* @throws IOException
@@ -80,5 +85,6 @@ public abstract class JobMonitor {
 throw new IOException(e);
   }
 }
+cleanup();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcbd205c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobMonitor.java
index fab018a..ee68ddb 

hadoop git commit: Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel."

2018-11-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 abd9d93a5 -> 8ab6aa1b4


Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is 
set. Contributed by Zsolt Venczel."

This reverts commit 0424715207cd07debeee5c624973e9db90d36fb6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ab6aa1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ab6aa1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ab6aa1b

Branch: refs/heads/branch-3.1
Commit: 8ab6aa1b4274a0d3bae3a4ab3b7e6ca252227e39
Parents: abd9d93
Author: Xiao Chen 
Authored: Tue Nov 13 12:45:35 2018 -0800
Committer: Xiao Chen 
Committed: Tue Nov 13 12:46:03 2018 -0800

--
 .../main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 12 ++--
 .../src/test/resources/testErasureCodingConf.xml|  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab6aa1b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 56d453b..56706b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -357,16 +357,16 @@ public class ECAdmin extends Configured implements Tool {
   final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
-
-String actualECPolicyName = dfs.getErasureCodingPolicy(p).getName();
-
-System.out.println("Set " + actualECPolicyName +
-" erasure coding policy on "+ path);
+if (ecPolicyName == null){
+  ecPolicyName = "default";
+}
+System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
+" " + path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
   "non-empty directory will not automatically convert existing " +
-  "files to " + actualECPolicyName + " erasure coding policy");
+  "files to " + ecPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab6aa1b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index b47d50f..9070367 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -710,7 +710,7 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
   
 
@@ -728,11 +728,11 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel."

2018-11-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 f214af74b -> 5afd7efe2


Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is 
set. Contributed by Zsolt Venczel."

This reverts commit 7dc79a8b5b7af0bf37d25a221be8ed446b0edb74.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5afd7efe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5afd7efe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5afd7efe

Branch: refs/heads/branch-3.2
Commit: 5afd7efe24e3c40bb01f3b6baa9c25f72797e42d
Parents: f214af7
Author: Xiao Chen 
Authored: Tue Nov 13 12:45:02 2018 -0800
Committer: Xiao Chen 
Committed: Tue Nov 13 12:45:55 2018 -0800

--
 .../main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 12 ++--
 .../src/test/resources/testErasureCodingConf.xml|  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5afd7efe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 903a1e2..5f8626e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -358,16 +358,16 @@ public class ECAdmin extends Configured implements Tool {
   final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
-
-String actualECPolicyName = dfs.getErasureCodingPolicy(p).getName();
-
-System.out.println("Set " + actualECPolicyName +
-" erasure coding policy on "+ path);
+if (ecPolicyName == null){
+  ecPolicyName = "default";
+}
+System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
+" " + path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
   "non-empty directory will not automatically convert existing " +
-  "files to " + actualECPolicyName + " erasure coding policy");
+  "files to " + ecPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5afd7efe/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 34f5176..6411fe6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -734,7 +734,7 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
   
 
@@ -752,11 +752,11 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel."

2018-11-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 762a56cc6 -> 9da6054ca


Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is 
set. Contributed by Zsolt Venczel."

This reverts commit 7dc79a8b5b7af0bf37d25a221be8ed446b0edb74.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9da6054c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9da6054c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9da6054c

Branch: refs/heads/trunk
Commit: 9da6054ca4ff6f8bb19506d80685b17d2c79
Parents: 762a56c
Author: Xiao Chen 
Authored: Tue Nov 13 12:43:58 2018 -0800
Committer: Xiao Chen 
Committed: Tue Nov 13 12:44:25 2018 -0800

--
 .../main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 12 ++--
 .../src/test/resources/testErasureCodingConf.xml|  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9da6054c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 903a1e2..5f8626e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -358,16 +358,16 @@ public class ECAdmin extends Configured implements Tool {
   final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
-
-String actualECPolicyName = dfs.getErasureCodingPolicy(p).getName();
-
-System.out.println("Set " + actualECPolicyName +
-" erasure coding policy on "+ path);
+if (ecPolicyName == null){
+  ecPolicyName = "default";
+}
+System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
+" " + path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
   "non-empty directory will not automatically convert existing " +
-  "files to " + actualECPolicyName + " erasure coding policy");
+  "files to " + ecPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9da6054c/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 34f5176..6411fe6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -734,7 +734,7 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
   
 
@@ -752,11 +752,11 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7158. Inefficient Flush Logic in JobHistory EventWriter. (Zichen Sun via wangda)

2018-11-13 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 c97f8b6d9 -> abd9d93a5


MAPREDUCE-7158. Inefficient Flush Logic in JobHistory EventWriter. (Zichen Sun 
via wangda)

Change-Id: I99ace87980da03bb35a8012cea7218d602a8817a
(cherry picked from commit 762a56cc64bc07d57f94e253920534b8e049f238)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abd9d93a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abd9d93a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abd9d93a

Branch: refs/heads/branch-3.1
Commit: abd9d93a5544a3172c9d9a48385c2ce7fc13fa34
Parents: c97f8b6
Author: Wangda Tan 
Authored: Tue Nov 13 11:25:41 2018 -0800
Committer: Wangda Tan 
Committed: Tue Nov 13 11:36:04 2018 -0800

--
 .../java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abd9d93a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
index 46e4f1a..b07c676 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
@@ -93,7 +93,6 @@ public class EventWriter {
 wrapper.setType(event.getEventType());
 wrapper.setEvent(event.getDatum());
 writer.write(wrapper, encoder);
-encoder.flush();
 if (this.jsonOutput) {
   out.writeBytes("\n");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: MAPREDUCE-7158. Inefficient Flush Logic in JobHistory EventWriter. (Zichen Sun via wangda)

2018-11-13 Thread wangda
MAPREDUCE-7158. Inefficient Flush Logic in JobHistory EventWriter. (Zichen Sun 
via wangda)

Change-Id: I99ace87980da03bb35a8012cea7218d602a8817a
(cherry picked from commit 762a56cc64bc07d57f94e253920534b8e049f238)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f214af74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f214af74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f214af74

Branch: refs/heads/branch-3.2
Commit: f214af74b29beb4aa76fde30d81cfb6e8c59f694
Parents: ea75304
Author: Wangda Tan 
Authored: Tue Nov 13 11:25:41 2018 -0800
Committer: Wangda Tan 
Committed: Tue Nov 13 11:35:13 2018 -0800

--
 .../java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f214af74/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
index 46e4f1a..b07c676 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
@@ -93,7 +93,6 @@ public class EventWriter {
 wrapper.setType(event.getEventType());
 wrapper.setEvent(event.getDatum());
 writer.write(wrapper, encoder);
-encoder.flush();
 if (this.jsonOutput) {
   out.writeBytes("\n");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-8918. [Submarine] Correct method usage of str.subString in CliUtils. (Zhankun Tang via wangda)

2018-11-13 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 86deff0a0 -> f214af74b


YARN-8918. [Submarine] Correct method usage of str.subString in CliUtils. 
(Zhankun Tang via wangda)

Change-Id: Id1f11dbab3aa838dee3a0ec8b8fd5dc32f5dd946
(cherry picked from commit 076b795b2e82d7f89cc91e0a8513c7081ee8b930)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea753046
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea753046
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea753046

Branch: refs/heads/branch-3.2
Commit: ea753046b0f418dc850db618f1e2d8b4edeb3f81
Parents: 86deff0
Author: Wangda Tan 
Authored: Tue Nov 13 11:24:15 2018 -0800
Committer: Wangda Tan 
Committed: Tue Nov 13 11:35:06 2018 -0800

--
 .../java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea753046/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
index 546c6eb..bfdfa9a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
@@ -72,7 +72,7 @@ public class CliUtils {
   resourcesStr = resourcesStr.substring(1);
 }
 if (resourcesStr.endsWith("]")) {
-  resourcesStr = resourcesStr.substring(0, resourcesStr.length());
+  resourcesStr = resourcesStr.substring(0, resourcesStr.length() - 1);
 }
 
 for (String resource : resourcesStr.trim().split(",")) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: MAPREDUCE-7158. Inefficient Flush Logic in JobHistory EventWriter. (Zichen Sun via wangda)

2018-11-13 Thread wangda
MAPREDUCE-7158. Inefficient Flush Logic in JobHistory EventWriter. (Zichen Sun 
via wangda)

Change-Id: I99ace87980da03bb35a8012cea7218d602a8817a


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/762a56cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/762a56cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/762a56cc

Branch: refs/heads/trunk
Commit: 762a56cc64bc07d57f94e253920534b8e049f238
Parents: 076b795
Author: Wangda Tan 
Authored: Tue Nov 13 11:25:41 2018 -0800
Committer: Wangda Tan 
Committed: Tue Nov 13 11:25:41 2018 -0800

--
 .../java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/762a56cc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
index 46e4f1a..b07c676 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
@@ -93,7 +93,6 @@ public class EventWriter {
 wrapper.setType(event.getEventType());
 wrapper.setEvent(event.getDatum());
 writer.write(wrapper, encoder);
-encoder.flush();
 if (this.jsonOutput) {
   out.writeBytes("\n");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-8918. [Submarine] Correct method usage of str.subString in CliUtils. (Zhankun Tang via wangda)

2018-11-13 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 671fd6524 -> 762a56cc6


YARN-8918. [Submarine] Correct method usage of str.subString in CliUtils. 
(Zhankun Tang via wangda)

Change-Id: Id1f11dbab3aa838dee3a0ec8b8fd5dc32f5dd946


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/076b795b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/076b795b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/076b795b

Branch: refs/heads/trunk
Commit: 076b795b2e82d7f89cc91e0a8513c7081ee8b930
Parents: 671fd65
Author: Wangda Tan 
Authored: Tue Nov 13 11:24:15 2018 -0800
Committer: Wangda Tan 
Committed: Tue Nov 13 11:24:15 2018 -0800

--
 .../java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/076b795b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
index 05e830f..f85c82a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
@@ -72,7 +72,7 @@ public class CliUtils {
   resourcesStr = resourcesStr.substring(1);
 }
 if (resourcesStr.endsWith("]")) {
-  resourcesStr = resourcesStr.substring(0, resourcesStr.length());
+  resourcesStr = resourcesStr.substring(0, resourcesStr.length() - 1);
 }
 
 for (String resource : resourcesStr.trim().split(",")) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13852. RBF: The DN_REPORT_TIME_OUT and DN_REPORT_CACHE_EXPIRE should be configured in RBFConfigKeys. Contributed by yanghuafeng.

2018-11-13 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-13891 053662e9f -> 2bd2e6c31


HDFS-13852. RBF: The DN_REPORT_TIME_OUT and DN_REPORT_CACHE_EXPIRE should be 
configured in RBFConfigKeys. Contributed by yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bd2e6c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bd2e6c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bd2e6c3

Branch: refs/heads/HDFS-13891
Commit: 2bd2e6c31cee34f8edcb197779743b6276a7da88
Parents: 053662e
Author: Inigo Goiri 
Authored: Tue Nov 13 10:14:35 2018 -0800
Committer: Inigo Goiri 
Committed: Tue Nov 13 10:14:35 2018 -0800

--
 .../federation/metrics/FederationMetrics.java   | 12 +--
 .../federation/metrics/NamenodeBeanMetrics.java | 22 
 .../server/federation/router/RBFConfigKeys.java |  7 +++
 .../src/main/resources/hdfs-rbf-default.xml | 17 +++
 .../router/TestRouterRPCClientRetries.java  |  2 +-
 .../server/federation/router/TestRouterRpc.java |  2 +-
 6 files changed, 40 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bd2e6c3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
index 23f62b6..6a0a46e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
@@ -47,12 +47,14 @@ import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
 import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.Router;
 import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
 import org.apache.hadoop.hdfs.server.federation.store.MembershipStore;
@@ -95,7 +97,7 @@ public class FederationMetrics implements FederationMBean {
   private static final String DATE_FORMAT = "/MM/dd HH:mm:ss";
 
   /** Prevent holding the page from load too long. */
-  private static final long TIME_OUT = TimeUnit.SECONDS.toMillis(1);
+  private final long timeOut;
 
 
   /** Router interface. */
@@ -143,6 +145,12 @@ public class FederationMetrics implements FederationMBean {
   this.routerStore = stateStore.getRegisteredRecordStore(
   RouterStore.class);
 }
+
+// Initialize the cache for the DN reports
+Configuration conf = router.getConfig();
+this.timeOut = conf.getTimeDuration(RBFConfigKeys.DN_REPORT_TIME_OUT,
+RBFConfigKeys.DN_REPORT_TIME_OUT_MS_DEFAULT, TimeUnit.MILLISECONDS);
+
   }
 
   /**
@@ -434,7 +442,7 @@ public class FederationMetrics implements FederationMBean {
 try {
   RouterRpcServer rpcServer = this.router.getRpcServer();
   DatanodeInfo[] live = rpcServer.getDatanodeReport(
-  DatanodeReportType.LIVE, false, TIME_OUT);
+  DatanodeReportType.LIVE, false, timeOut);
 
   if (live.length > 0) {
 float totalDfsUsed = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bd2e6c3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
index e8ebf0d..da9a927 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMe

[2/2] hadoop git commit: HDDS-675. Add blocking buffer and use watchApi for flush/close in OzoneClient. Contributed by Shashikant Banerjee.

2018-11-13 Thread shashikant
HDDS-675. Add blocking buffer and use watchApi for flush/close in OzoneClient. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/671fd652
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/671fd652
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/671fd652

Branch: refs/heads/trunk
Commit: 671fd6524b2640474de2bc3b8dbaa0a3cf7fcf01
Parents: 75291e6
Author: Shashikant Banerjee 
Authored: Tue Nov 13 23:39:14 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Nov 13 23:39:14 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |  28 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java |  65 ++-
 .../hdds/scm/storage/ChunkOutputStream.java | 448 +++
 .../hdds/scm/XceiverClientAsyncReply.java   |  98 
 .../hadoop/hdds/scm/XceiverClientSpi.java   |  12 +-
 .../scm/storage/ContainerProtocolCalls.java |  57 ++-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  24 +-
 .../common/src/main/resources/ozone-default.xml |  26 +-
 .../keyvalue/impl/BlockManagerImpl.java |   3 +
 .../hadoop/ozone/client/OzoneClientUtils.java   |  27 --
 .../ozone/client/io/ChunkGroupOutputStream.java | 337 +++---
 .../hadoop/ozone/client/rpc/RpcClient.java  |  27 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  45 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  19 +
 .../apache/hadoop/ozone/RatisTestHelper.java|   2 +-
 .../rpc/TestCloseContainerHandlingByClient.java | 252 +++
 .../rpc/TestContainerStateMachineFailures.java  |  20 +-
 .../client/rpc/TestFailureHandlingByClient.java | 213 +
 .../ozone/container/ContainerTestHelper.java|  34 ++
 .../container/ozoneimpl/TestOzoneContainer.java |   2 +-
 .../ozone/scm/TestXceiverClientMetrics.java |   3 +-
 .../ozone/web/TestOzoneRestWithMiniCluster.java |   2 +-
 .../web/storage/DistributedStorageHandler.java  |  42 +-
 .../hadoop/ozone/freon/TestDataValidate.java|   6 +
 .../ozone/freon/TestRandomKeyGenerator.java |   6 +
 25 files changed, 1248 insertions(+), 550 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/671fd652/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index cc34e27..9acd832 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdds.scm;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
@@ -47,6 +48,7 @@ import java.util.Map;
 import java.util.HashMap;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 
 /**
  * A Client for the storageContainer protocol.
@@ -163,7 +165,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 // In case the command gets retried on a 2nd datanode,
 // sendCommandAsyncCall will create a new channel and async stub
 // in case these don't exist for the specific datanode.
-responseProto = sendCommandAsync(request, dn).get();
+responseProto = sendCommandAsync(request, dn).getResponse().get();
 if (responseProto.getResult() == ContainerProtos.Result.SUCCESS) {
   break;
 }
@@ -197,13 +199,23 @@ public class XceiverClientGrpc extends XceiverClientSpi {
* @throws IOException
*/
   @Override
-  public CompletableFuture sendCommandAsync(
+  public XceiverClientAsyncReply sendCommandAsync(
   ContainerCommandRequestProto request)
   throws IOException, ExecutionException, InterruptedException {
-return sendCommandAsync(request, pipeline.getFirstNode());
+XceiverClientAsyncReply asyncReply =
+sendCommandAsync(request, pipeline.getFirstNode());
+
+// TODO : for now make this API sync in nature as async requests are
+// served out of order over XceiverClientGrpc. This needs to be fixed
+// if this API is to be used for I/O path. Currently, this is not
+// used for Read/Write Operation but for tests.
+if (!HddsUtils.isReadOnly(request)) {
+  

[1/2] hadoop git commit: HDDS-675. Add blocking buffer and use watchApi for flush/close in OzoneClient. Contributed by Shashikant Banerjee.

2018-11-13 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 75291e6d5 -> 671fd6524


http://git-wip-us.apache.org/repos/asf/hadoop/blob/671fd652/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index 43517ae..935423d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.ozone.client.rpc;
 
-import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -27,11 +26,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
-StorageContainerException;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -55,15 +49,17 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 import java.io.IOException;
-import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.TimeUnit;
+
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 
 /**
  * Tests Close Container Exception handling by Ozone Client.
@@ -79,7 +75,6 @@ public class TestCloseContainerHandlingByClient {
   private static String volumeName;
   private static String bucketName;
   private static String keyString;
-  private static int maxRetries;
 
   /**
* Create a MiniDFSCluster for testing.
@@ -91,15 +86,14 @@ public class TestCloseContainerHandlingByClient {
   @BeforeClass
   public static void init() throws Exception {
 conf = new OzoneConfiguration();
-maxRetries = 100;
-conf.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, maxRetries);
-conf.set(OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL, "200ms");
 chunkSize = (int) OzoneConsts.MB;
 blockSize = 4 * chunkSize;
-conf.setInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, chunkSize);
+conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
+conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, 
TimeUnit.MILLISECONDS);
+conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
+conf.setQuietMode(false);
 conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, (4));
-cluster = MiniOzoneCluster.newBuilder(conf)
-.setNumDatanodes(3).build();
+cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7).build();
 cluster.waitForClusterToBeReady();
 //the easiest way to create an open container is creating a key
 client = OzoneClientFactory.getClient(conf);
@@ -121,44 +115,29 @@ public class TestCloseContainerHandlingByClient {
 }
   }
 
-  private static String fixedLengthString(String string, int length) {
-return String.format("%1$" + length + "s", string);
-  }
-
   @Test
   public void testBlockWritesWithFlushAndClose() throws Exception {
 String keyName = "standalone";
-OzoneOutputStream key =
-createKey(keyName, ReplicationType.STAND_ALONE, 0);
+OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
 // write data more than 1 chunk
-byte[] data =
-fixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
+byte[] data = ContainerTestHelper
+.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
 key.write(data);
 
 Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
 //get the name of a valid container
 OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-.setBucketName(bucketName)
-.setType(HddsProtos.ReplicationType.STAND_ALONE)
+.setBucke

hadoop git commit: HADOOP-15869. BlockDecompressorStream#decompress should not return -1 in case of IOException. Contributed by Surendra Singh Lilhore

2018-11-13 Thread surendralilhore
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d72c13488 -> c97f8b6d9


HADOOP-15869. BlockDecompressorStream#decompress should not return -1 in case 
of IOException. Contributed by Surendra Singh Lilhore

(cherry picked from commit 75291e6d53c13debf45493a870a898b63779914b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c97f8b6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c97f8b6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c97f8b6d

Branch: refs/heads/branch-3.1
Commit: c97f8b6d95979cb61b3d564adf3c3c1e3fc7675e
Parents: d72c134
Author: Surendra Singh Lilhore 
Authored: Tue Nov 13 20:22:58 2018 +0530
Committer: Surendra Singh Lilhore 
Committed: Tue Nov 13 21:40:00 2018 +0530

--
 .../io/compress/BlockDecompressorStream.java|  4 +--
 .../compress/TestBlockDecompressorStream.java   | 29 
 2 files changed, 31 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c97f8b6d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
index 72509c7..de457d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
@@ -71,8 +71,8 @@ public class BlockDecompressorStream extends 
DecompressorStream {
 if (noUncompressedBytes == originalBlockSize) {
   // Get original data size
   try {
-originalBlockSize =  rawReadInt();
-  } catch (IOException ioe) {
+originalBlockSize = rawReadInt();
+  } catch (EOFException e) {
 return -1;
   }
   noUncompressedBytes = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c97f8b6d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
index c976572..cdab772 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
@@ -18,11 +18,15 @@
 package org.apache.hadoop.io.compress;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.nio.ByteBuffer;
 
 import org.junit.Test;
@@ -74,4 +78,29 @@ public class TestBlockDecompressorStream {
   fail("unexpected IOException : " + e);
 }
   }
+
+  @Test
+  public void testReadWhenIoExceptionOccure() throws IOException {
+File file = new File("testReadWhenIOException");
+try {
+  file.createNewFile();
+  InputStream io = new FileInputStream(file) {
+@Override
+public int read() throws IOException {
+  throw new IOException("File blocks missing");
+}
+  };
+
+  try (BlockDecompressorStream blockDecompressorStream =
+  new BlockDecompressorStream(io, new FakeDecompressor(), 1024)) {
+int byteRead = blockDecompressorStream.read();
+fail("Should not return -1 in case of IOException. Byte read "
++ byteRead);
+  } catch (IOException e) {
+assertTrue(e.getMessage().contains("File blocks missing"));
+  }
+} finally {
+  file.delete();
+}
+  }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15869. BlockDecompressorStream#decompress should not return -1 in case of IOException. Contributed by Surendra Singh Lilhore

2018-11-13 Thread surendralilhore
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 5e8664126 -> 86deff0a0


HADOOP-15869. BlockDecompressorStream#decompress should not return -1 in case 
of IOException. Contributed by Surendra Singh Lilhore

(cherry picked from commit 75291e6d53c13debf45493a870a898b63779914b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86deff0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86deff0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86deff0a

Branch: refs/heads/branch-3.2
Commit: 86deff0a06958ca4670900f96bd0340ad2dad9db
Parents: 5e86641
Author: Surendra Singh Lilhore 
Authored: Tue Nov 13 20:22:58 2018 +0530
Committer: Surendra Singh Lilhore 
Committed: Tue Nov 13 21:26:22 2018 +0530

--
 .../io/compress/BlockDecompressorStream.java|  4 +--
 .../compress/TestBlockDecompressorStream.java   | 29 
 2 files changed, 31 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86deff0a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
index 72509c7..de457d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
@@ -71,8 +71,8 @@ public class BlockDecompressorStream extends 
DecompressorStream {
 if (noUncompressedBytes == originalBlockSize) {
   // Get original data size
   try {
-originalBlockSize =  rawReadInt();
-  } catch (IOException ioe) {
+originalBlockSize = rawReadInt();
+  } catch (EOFException e) {
 return -1;
   }
   noUncompressedBytes = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86deff0a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
index c976572..cdab772 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
@@ -18,11 +18,15 @@
 package org.apache.hadoop.io.compress;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.nio.ByteBuffer;
 
 import org.junit.Test;
@@ -74,4 +78,29 @@ public class TestBlockDecompressorStream {
   fail("unexpected IOException : " + e);
 }
   }
+
+  @Test
+  public void testReadWhenIoExceptionOccure() throws IOException {
+File file = new File("testReadWhenIOException");
+try {
+  file.createNewFile();
+  InputStream io = new FileInputStream(file) {
+@Override
+public int read() throws IOException {
+  throw new IOException("File blocks missing");
+}
+  };
+
+  try (BlockDecompressorStream blockDecompressorStream =
+  new BlockDecompressorStream(io, new FakeDecompressor(), 1024)) {
+int byteRead = blockDecompressorStream.read();
+fail("Should not return -1 in case of IOException. Byte read "
++ byteRead);
+  } catch (IOException e) {
+assertTrue(e.getMessage().contains("File blocks missing"));
+  }
+} finally {
+  file.delete();
+}
+  }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15869. BlockDecompressorStream#decompress should not return -1 in case of IOException. Contributed by Surendra Singh Lilhore

2018-11-13 Thread surendralilhore
Repository: hadoop
Updated Branches:
  refs/heads/trunk e7b63baca -> 75291e6d5


HADOOP-15869. BlockDecompressorStream#decompress should not return -1 in case 
of IOException. Contributed by Surendra Singh Lilhore


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75291e6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75291e6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75291e6d

Branch: refs/heads/trunk
Commit: 75291e6d53c13debf45493a870a898b63779914b
Parents: e7b63ba
Author: Surendra Singh Lilhore 
Authored: Tue Nov 13 20:22:58 2018 +0530
Committer: Surendra Singh Lilhore 
Committed: Tue Nov 13 20:22:58 2018 +0530

--
 .../io/compress/BlockDecompressorStream.java|  4 +--
 .../compress/TestBlockDecompressorStream.java   | 29 
 2 files changed, 31 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75291e6d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
index 72509c7..de457d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
@@ -71,8 +71,8 @@ public class BlockDecompressorStream extends 
DecompressorStream {
 if (noUncompressedBytes == originalBlockSize) {
   // Get original data size
   try {
-originalBlockSize =  rawReadInt();
-  } catch (IOException ioe) {
+originalBlockSize = rawReadInt();
+  } catch (EOFException e) {
 return -1;
   }
   noUncompressedBytes = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75291e6d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
index c976572..cdab772 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
@@ -18,11 +18,15 @@
 package org.apache.hadoop.io.compress;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.nio.ByteBuffer;
 
 import org.junit.Test;
@@ -74,4 +78,29 @@ public class TestBlockDecompressorStream {
   fail("unexpected IOException : " + e);
 }
   }
+
+  @Test
+  public void testReadWhenIoExceptionOccure() throws IOException {
+File file = new File("testReadWhenIOException");
+try {
+  file.createNewFile();
+  InputStream io = new FileInputStream(file) {
+@Override
+public int read() throws IOException {
+  throw new IOException("File blocks missing");
+}
+  };
+
+  try (BlockDecompressorStream blockDecompressorStream =
+  new BlockDecompressorStream(io, new FakeDecompressor(), 1024)) {
+int byteRead = blockDecompressorStream.read();
+fail("Should not return -1 in case of IOException. Byte read "
++ byteRead);
+  } catch (IOException e) {
+assertTrue(e.getMessage().contains("File blocks missing"));
+  }
+} finally {
+  file.delete();
+}
+  }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2018-11-13 Thread elek
Repository: hadoop
Updated Tags:  refs/tags/ozone-0.3.0-alpha-RC0 [created] 3fbd1f15b

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org