[2/2] hadoop git commit: YARN-8679. [ATSv2] If HBase cluster is down for long time, high chances that NM ContainerManager dispatcher get blocked. Contributed by Wangda Tan.

2018-08-17 Thread rohithsharmaks
YARN-8679. [ATSv2] If HBase cluster is down for long time, high chances that NM 
ContainerManager dispatcher get blocked. Contributed by Wangda Tan.

(cherry picked from commit 4aacbfff605262aaf3dbd926258afcadc86c72c0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/675aa2bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/675aa2bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/675aa2bb

Branch: refs/heads/branch-3.0
Commit: 675aa2bbc05e2f900403f9b862d38ba0cf77c208
Parents: 8118b14
Author: Rohith Sharma K S 
Authored: Sat Aug 18 10:26:55 2018 +0530
Committer: Rohith Sharma K S 
Committed: Sat Aug 18 11:06:14 2018 +0530

--
 .../applicationsmanager/TestAMLaunchFailure.java   |  2 +-
 .../applicationsmanager/TestSchedulerNegotiator.java   |  2 +-
 .../TestTimelineServiceClientIntegration.java  |  3 ++-
 .../security/TestTimelineAuthFilterForV2.java  |  2 +-
 .../collector/PerNodeTimelineCollectorsAuxService.java | 13 +
 5 files changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/675aa2bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
index c0009dd..ad39099 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
@@ -90,7 +90,7 @@ public class TestAMLaunchFailure {
 //}
 //
 //@Override
-//public void addApplication(ApplicationId applicationId,
+//public void addApplicationIfAbsent(ApplicationId applicationId,
 //ApplicationMaster master, String user, String queue, Priority 
priority
 //, ApplicationStore appStore)
 //throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/675aa2bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
index 7d06e55..fedbf2b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
@@ -67,7 +67,7 @@ public class TestSchedulerNegotiator {
 //  return null;
 //}
 //@Override
-//public void addApplication(ApplicationId applicationId,
+//public void addApplicationIfAbsent(ApplicationId applicationId,
 //ApplicationMaster master, String user, String queue, Priority 
priority,
 //ApplicationStore store)
 //throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/675aa2bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
 

hadoop git commit: YARN-8679. [ATSv2] If HBase cluster is down for long time, high chances that NM ContainerManager dispatcher get blocked. Contributed by Wangda Tan.

2018-08-17 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 7556b09e9 -> a3d4a25bb


YARN-8679. [ATSv2] If HBase cluster is down for long time, high chances that NM 
ContainerManager dispatcher get blocked. Contributed by Wangda Tan.

(cherry picked from commit 4aacbfff605262aaf3dbd926258afcadc86c72c0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3d4a25b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3d4a25b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3d4a25b

Branch: refs/heads/branch-3.1
Commit: a3d4a25bbfe5e41393f790e77b8e457f13c8424d
Parents: 7556b09
Author: Rohith Sharma K S 
Authored: Sat Aug 18 10:26:55 2018 +0530
Committer: Rohith Sharma K S 
Committed: Sat Aug 18 11:04:09 2018 +0530

--
 .../applicationsmanager/TestAMLaunchFailure.java   |  2 +-
 .../applicationsmanager/TestSchedulerNegotiator.java   |  2 +-
 .../TestTimelineServiceClientIntegration.java  |  3 ++-
 .../security/TestTimelineAuthFilterForV2.java  |  2 +-
 .../collector/PerNodeTimelineCollectorsAuxService.java | 13 +
 5 files changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d4a25b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
index c0009dd..ad39099 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
@@ -90,7 +90,7 @@ public class TestAMLaunchFailure {
 //}
 //
 //@Override
-//public void addApplication(ApplicationId applicationId,
+//public void addApplicationIfAbsent(ApplicationId applicationId,
 //ApplicationMaster master, String user, String queue, Priority 
priority
 //, ApplicationStore appStore)
 //throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d4a25b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
index 7d06e55..fedbf2b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
@@ -67,7 +67,7 @@ public class TestSchedulerNegotiator {
 //  return null;
 //}
 //@Override
-//public void addApplication(ApplicationId applicationId,
+//public void addApplicationIfAbsent(ApplicationId applicationId,
 //ApplicationMaster master, String user, String queue, Priority 
priority,
 //ApplicationStore store)
 //throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d4a25b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
 

[1/2] hadoop git commit: YARN-7835. Race condition in NM while publishing events if second attempt is launched on the same node. (Rohith Sharma K S via Haibo Chen)

2018-08-17 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 a41f18098 -> 675aa2bbc


YARN-7835. Race condition in NM while publishing events if second attempt is 
launched on the same node. (Rohith Sharma K S via Haibo Chen)

(cherry picked from commit d1274c3b71549cb000868500c293cafd880b3713)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8118b14d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8118b14d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8118b14d

Branch: refs/heads/branch-3.0
Commit: 8118b14db84f8a4b5c955894b65f2f8d22be5254
Parents: a41f180
Author: Haibo Chen 
Authored: Wed Feb 28 21:06:42 2018 -0800
Committer: Rohith Sharma K S 
Committed: Sat Aug 18 11:06:02 2018 +0530

--
 .../PerNodeTimelineCollectorsAuxService.java| 51 +--
 ...TestPerNodeTimelineCollectorsAuxService.java | 93 
 2 files changed, 120 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8118b14d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
index 66f9aab..c15f99d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
@@ -19,7 +19,12 @@
 package org.apache.hadoop.yarn.server.timelineservice.collector;
 
 import java.nio.ByteBuffer;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
@@ -31,6 +36,7 @@ import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
@@ -59,6 +65,8 @@ public class PerNodeTimelineCollectorsAuxService extends 
AuxiliaryService {
   private final NodeTimelineCollectorManager collectorManager;
   private long collectorLingerPeriod;
   private ScheduledExecutorService scheduler;
+  private Map> appIdToContainerId =
+  new ConcurrentHashMap<>();
 
   public PerNodeTimelineCollectorsAuxService() {
 this(new NodeTimelineCollectorManager(true));
@@ -148,7 +156,15 @@ public class PerNodeTimelineCollectorsAuxService extends 
AuxiliaryService {
 if (context.getContainerType() == ContainerType.APPLICATION_MASTER) {
   ApplicationId appId = context.getContainerId().
   getApplicationAttemptId().getApplicationId();
-  addApplication(appId, context.getUser());
+  synchronized (appIdToContainerId) {
+Set masterContainers = appIdToContainerId.get(appId);
+if (masterContainers == null) {
+  masterContainers = new HashSet<>();
+  appIdToContainerId.put(appId, masterContainers);
+}
+masterContainers.add(context.getContainerId());
+addApplication(appId, context.getUser());
+  }
 }
   }
 
@@ -162,17 +178,36 @@ public class PerNodeTimelineCollectorsAuxService extends 
AuxiliaryService {
 // intercept the event of the AM container being stopped and remove the app
 // level collector service
 if (context.getContainerType() == ContainerType.APPLICATION_MASTER) {
-  final ApplicationId appId =
-  
context.getContainerId().getApplicationAttemptId().getApplicationId();
-  scheduler.schedule(new Runnable() {
-public void run() {
-  removeApplication(appId);
-}
-  }, collectorLingerPeriod, TimeUnit.MILLISECONDS);
+  final ContainerId containerId = context.getContainerId();
+  

hadoop git commit: YARN-8679. [ATSv2] If HBase cluster is down for long time, high chances that NM ContainerManager dispatcher get blocked. Contributed by Wangda Tan.

2018-08-17 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 805647287 -> c68d1d49c


YARN-8679. [ATSv2] If HBase cluster is down for long time, high chances that NM 
ContainerManager dispatcher get blocked. Contributed by Wangda Tan.

(cherry picked from commit 4aacbfff605262aaf3dbd926258afcadc86c72c0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c68d1d49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c68d1d49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c68d1d49

Branch: refs/heads/branch-2
Commit: c68d1d49ca05eb9da9fd31b93c8c6ebd7717cce3
Parents: 8056472
Author: Rohith Sharma K S 
Authored: Sat Aug 18 10:26:55 2018 +0530
Committer: Rohith Sharma K S 
Committed: Sat Aug 18 11:04:20 2018 +0530

--
 .../applicationsmanager/TestAMLaunchFailure.java   |  2 +-
 .../applicationsmanager/TestSchedulerNegotiator.java   |  2 +-
 .../TestTimelineServiceClientIntegration.java  |  3 ++-
 .../security/TestTimelineAuthFilterForV2.java  |  2 +-
 .../collector/PerNodeTimelineCollectorsAuxService.java | 13 +
 5 files changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c68d1d49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
index c0009dd..ad39099 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
@@ -90,7 +90,7 @@ public class TestAMLaunchFailure {
 //}
 //
 //@Override
-//public void addApplication(ApplicationId applicationId,
+//public void addApplicationIfAbsent(ApplicationId applicationId,
 //ApplicationMaster master, String user, String queue, Priority 
priority
 //, ApplicationStore appStore)
 //throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c68d1d49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
index 7d06e55..fedbf2b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
@@ -67,7 +67,7 @@ public class TestSchedulerNegotiator {
 //  return null;
 //}
 //@Override
-//public void addApplication(ApplicationId applicationId,
+//public void addApplicationIfAbsent(ApplicationId applicationId,
 //ApplicationMaster master, String user, String queue, Priority 
priority,
 //ApplicationStore store)
 //throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c68d1d49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
 

hadoop git commit: YARN-8679. [ATSv2] If HBase cluster is down for long time, high chances that NM ContainerManager dispatcher get blocked. Contributed by Wangda Tan.

2018-08-17 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk 79c97f6a0 -> 4aacbfff6


YARN-8679. [ATSv2] If HBase cluster is down for long time, high chances that NM 
ContainerManager dispatcher get blocked. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4aacbfff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4aacbfff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4aacbfff

Branch: refs/heads/trunk
Commit: 4aacbfff605262aaf3dbd926258afcadc86c72c0
Parents: 79c97f6
Author: Rohith Sharma K S 
Authored: Sat Aug 18 10:26:55 2018 +0530
Committer: Rohith Sharma K S 
Committed: Sat Aug 18 10:26:55 2018 +0530

--
 .../applicationsmanager/TestAMLaunchFailure.java   |  2 +-
 .../applicationsmanager/TestSchedulerNegotiator.java   |  2 +-
 .../TestTimelineServiceClientIntegration.java  |  3 ++-
 .../security/TestTimelineAuthFilterForV2.java  |  2 +-
 .../collector/PerNodeTimelineCollectorsAuxService.java | 13 +
 5 files changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aacbfff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
index c0009dd..ad39099 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
@@ -90,7 +90,7 @@ public class TestAMLaunchFailure {
 //}
 //
 //@Override
-//public void addApplication(ApplicationId applicationId,
+//public void addApplicationIfAbsent(ApplicationId applicationId,
 //ApplicationMaster master, String user, String queue, Priority 
priority
 //, ApplicationStore appStore)
 //throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aacbfff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
index 7d06e55..fedbf2b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestSchedulerNegotiator.java
@@ -67,7 +67,7 @@ public class TestSchedulerNegotiator {
 //  return null;
 //}
 //@Override
-//public void addApplication(ApplicationId applicationId,
+//public void addApplicationIfAbsent(ApplicationId applicationId,
 //ApplicationMaster master, String user, String queue, Priority 
priority,
 //ApplicationStore store)
 //throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aacbfff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
 

hadoop git commit: HADOOP-14624. Add GenericTestUtils.DelayAnswer that accept slf4j logger API. Contributed by Ian Pickering and Wenxin He.

2018-08-17 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/trunk ab37423ad -> 79c97f6a0


HADOOP-14624. Add GenericTestUtils.DelayAnswer that accept slf4j logger API. 
Contributed by Ian Pickering and Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79c97f6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79c97f6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79c97f6a

Branch: refs/heads/trunk
Commit: 79c97f6a0bebc95ff81a8ef9b07d3619f05ed583
Parents: ab37423
Author: Giovanni Matteo Fumarola 
Authored: Fri Aug 17 14:40:00 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Fri Aug 17 14:40:00 2018 -0700

--
 .../java/org/apache/hadoop/test/GenericTestUtils.java  |  8 
 .../org/apache/hadoop/hdfs/TestDFSClientFailover.java  |  6 --
 .../java/org/apache/hadoop/hdfs/TestFileAppend4.java   |  6 +++---
 .../java/org/apache/hadoop/hdfs/TestReplication.java   |  6 --
 .../hdfs/qjournal/client/TestIPCLoggerChannel.java |  6 +++---
 .../hdfs/server/datanode/BlockReportTestBase.java  |  8 
 .../hadoop/hdfs/server/namenode/FSImageTestUtil.java   | 13 +
 .../hadoop/hdfs/server/namenode/TestCheckpoint.java|  4 +++-
 .../hadoop/hdfs/server/namenode/TestDeleteRace.java| 10 +-
 .../hadoop/hdfs/server/namenode/TestSaveNamespace.java | 10 +-
 .../hadoop/hdfs/server/namenode/ha/TestDNFencing.java  |  6 +++---
 .../hdfs/server/namenode/ha/TestPipelinesFailover.java | 10 +-
 .../server/namenode/ha/TestStandbyCheckpoints.java |  6 +++---
 13 files changed, 59 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79c97f6a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 0112894..d68f4e2 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -501,7 +501,7 @@ public abstract class GenericTestUtils {
* method is called, then waits on another before continuing.
*/
   public static class DelayAnswer implements Answer {
-private final Log LOG;
+private final org.slf4j.Logger LOG;
 
 private final CountDownLatch fireLatch = new CountDownLatch(1);
 private final CountDownLatch waitLatch = new CountDownLatch(1);
@@ -514,7 +514,7 @@ public abstract class GenericTestUtils {
 private volatile Throwable thrown;
 private volatile Object returnValue;
 
-public DelayAnswer(Log log) {
+public DelayAnswer(org.slf4j.Logger log) {
   this.LOG = log;
 }
 
@@ -611,13 +611,13 @@ public abstract class GenericTestUtils {
*/
   public static class DelegateAnswer implements Answer {
 private final Object delegate;
-private final Log log;
+private final org.slf4j.Logger log;
 
 public DelegateAnswer(Object delegate) {
   this(null, delegate);
 }
 
-public DelegateAnswer(Log log, Object delegate) {
+public DelegateAnswer(org.slf4j.Logger log, Object delegate) {
   this.log = log;
   this.delegate = delegate;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79c97f6a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
index c14ebb4..f9d0460 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
@@ -34,6 +34,8 @@ import java.util.List;
 
 import javax.net.SocketFactory;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -67,7 +69,7 @@ import sun.net.spi.nameservice.NameService;
 
 public class TestDFSClientFailover {
   
-  private static final Log LOG = 
LogFactory.getLog(TestDFSClientFailover.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestDFSClientFailover.class);
   
   private static final Path TEST_FILE = new Path("/tmp/failover-test-file");
   private static final int 

hadoop git commit: YARN-8640. Restore previous state in container-executor after failure. Contributed by Jim Brennan

2018-08-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 2ed1a5d00 -> a9d86c526


YARN-8640. Restore previous state in container-executor after failure. 
Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9d86c52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9d86c52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9d86c52

Branch: refs/heads/branch-2.7
Commit: a9d86c526534f6df29318491bcf6df0fdf431b8d
Parents: 2ed1a5d
Author: Jason Lowe 
Authored: Fri Aug 17 15:21:00 2018 -0500
Committer: Jason Lowe 
Committed: Fri Aug 17 15:21:00 2018 -0500

--
 .../impl/container-executor.c   | 40 
 .../test/test-container-executor.c  |  5 ++-
 2 files changed, 27 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9d86c52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 474900b..d04179a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -165,10 +165,12 @@ static int change_effective_user(uid_t user, gid_t group) 
{
  * cgroup_file: Path to cgroup file where pid needs to be written to.
  */
 static int write_pid_to_cgroup_as_root(const char* cgroup_file, pid_t pid) {
+  int rc = 0;
   uid_t user = geteuid();
   gid_t group = getegid();
   if (change_effective_user(0, 0) != 0) {
-return -1;
+rc = -1;
+goto cleanup;
   }
 
   // open
@@ -176,7 +178,8 @@ static int write_pid_to_cgroup_as_root(const char* 
cgroup_file, pid_t pid) {
   if (cgroup_fd == -1) {
 fprintf(LOGFILE, "Can't open file %s as node manager - %s\n", cgroup_file,
strerror(errno));
-return -1;
+rc = -1;
+goto cleanup;
   }
 
   // write pid
@@ -187,15 +190,17 @@ static int write_pid_to_cgroup_as_root(const char* 
cgroup_file, pid_t pid) {
   if (written == -1) {
 fprintf(LOGFILE, "Failed to write pid to file %s - %s\n",
cgroup_file, strerror(errno));
-return -1;
+rc = -1;
+goto cleanup;
   }
 
+cleanup:
   // Revert back to the calling user.
   if (change_effective_user(user, group)) {
-return -1;
+rc = -1;
   }
 
-  return 0;
+  return rc;
 }
 #endif
 
@@ -204,21 +209,24 @@ static int write_pid_to_cgroup_as_root(const char* 
cgroup_file, pid_t pid) {
  * pid_file: Path to pid file where pid needs to be written to
  */
 static int write_pid_to_file_as_nm(const char* pid_file, pid_t pid) {
+  char *temp_pid_file = NULL;
+  int rc = 0;
   uid_t user = geteuid();
   gid_t group = getegid();
   if (change_effective_user(nm_uid, nm_gid) != 0) {
-return -1;
+rc = -1;
+goto cleanup;
   }
 
-  char *temp_pid_file = concatenate("%s.tmp", "pid_file_path", 1, pid_file);
+  temp_pid_file = concatenate("%s.tmp", "pid_file_path", 1, pid_file);
 
   // create with 700
   int pid_fd = open(temp_pid_file, O_WRONLY|O_CREAT|O_EXCL, S_IRWXU);
   if (pid_fd == -1) {
 fprintf(LOGFILE, "Can't open file %s as node manager - %s\n", 
temp_pid_file,
strerror(errno));
-free(temp_pid_file);
-return -1;
+rc = -1;
+goto cleanup;
   }
 
   // write pid to temp file
@@ -229,8 +237,8 @@ static int write_pid_to_file_as_nm(const char* pid_file, 
pid_t pid) {
   if (written == -1) {
 fprintf(LOGFILE, "Failed to write pid to file %s as node manager - %s\n",
temp_pid_file, strerror(errno));
-free(temp_pid_file);
-return -1;
+rc = -1;
+goto cleanup;
   }
 
   // rename temp file to actual pid file
@@ -239,18 +247,18 @@ static int write_pid_to_file_as_nm(const char* pid_file, 
pid_t pid) {
 fprintf(LOGFILE, "Can't move pid file from %s to %s as node manager - 
%s\n",
 temp_pid_file, pid_file, strerror(errno));
 unlink(temp_pid_file);
-free(temp_pid_file);
-return -1;
+rc = -1;
+goto cleanup;
   }
 
+cleanup:
   // Revert back to the calling user.
   if (change_effective_user(user, group)) {
-   free(temp_pid_file);
-return -1;
+rc = -1;
   }
 
   free(temp_pid_file);
-  return 0;
+  return rc;
 }
 
 /**


hadoop git commit: YARN-8640. Restore previous state in container-executor after failure. Contributed by Jim Brennan

2018-08-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5232653ec -> a896a011d


YARN-8640. Restore previous state in container-executor after failure. 
Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a896a011
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a896a011
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a896a011

Branch: refs/heads/branch-2.8
Commit: a896a011d7e62f413db9bc7bd108b27c05d7be9a
Parents: 5232653
Author: Jason Lowe 
Authored: Fri Aug 17 15:13:02 2018 -0500
Committer: Jason Lowe 
Committed: Fri Aug 17 15:13:02 2018 -0500

--
 .../impl/container-executor.c   | 41 
 1 file changed, 24 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a896a011/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 28a924a..37cc659 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -199,10 +199,12 @@ static int change_effective_user(uid_t user, gid_t group) 
{
  * cgroup_file: Path to cgroup file where pid needs to be written to.
  */
 static int write_pid_to_cgroup_as_root(const char* cgroup_file, pid_t pid) {
+  int rc = 0;
   uid_t user = geteuid();
   gid_t group = getegid();
   if (change_effective_user(0, 0) != 0) {
-return -1;
+rc = -1;
+goto cleanup;
   }
 
   // open
@@ -210,7 +212,8 @@ static int write_pid_to_cgroup_as_root(const char* 
cgroup_file, pid_t pid) {
   if (cgroup_fd == -1) {
 fprintf(LOGFILE, "Can't open file %s as node manager - %s\n", cgroup_file,
strerror(errno));
-return -1;
+rc = -1;
+goto cleanup;
   }
 
   // write pid
@@ -221,15 +224,17 @@ static int write_pid_to_cgroup_as_root(const char* 
cgroup_file, pid_t pid) {
   if (written == -1) {
 fprintf(LOGFILE, "Failed to write pid to file %s - %s\n",
cgroup_file, strerror(errno));
-return -1;
+rc = -1;
+goto cleanup;
   }
 
+cleanup:
   // Revert back to the calling user.
   if (change_effective_user(user, group)) {
-return -1;
+rc = -1;
   }
 
-  return 0;
+  return rc;
 }
 #endif
 
@@ -238,15 +243,18 @@ static int write_pid_to_cgroup_as_root(const char* 
cgroup_file, pid_t pid) {
  * pid_file: Path to pid file where pid needs to be written to
  */
 static int write_pid_to_file_as_nm(const char* pid_file, pid_t pid) {
+  char *temp_pid_file = NULL;
+  int rc = 0;
   uid_t user = geteuid();
   gid_t group = getegid();
   if (change_effective_user(nm_uid, nm_gid) != 0) {
 fprintf(ERRORFILE, "Could not change to effective users %d, %d\n", nm_uid, 
nm_gid);
 fflush(ERRORFILE);
-return -1;
+rc = -1;
+goto cleanup;
   }
 
-  char *temp_pid_file = concatenate("%s.tmp", "pid_file_path", 1, pid_file);
+  temp_pid_file = concatenate("%s.tmp", "pid_file_path", 1, pid_file);
   fprintf(LOGFILE, "Writing to tmp file %s\n", temp_pid_file);
   fflush(LOGFILE);
   // create with 700
@@ -255,8 +263,8 @@ static int write_pid_to_file_as_nm(const char* pid_file, 
pid_t pid) {
 fprintf(LOGFILE, "Can't open file %s as node manager - %s\n", 
temp_pid_file,
strerror(errno));
 fflush(LOGFILE);
-free(temp_pid_file);
-return -1;
+rc = -1;
+goto cleanup;
   }
 
   // write pid to temp file
@@ -268,8 +276,8 @@ static int write_pid_to_file_as_nm(const char* pid_file, 
pid_t pid) {
 fprintf(LOGFILE, "Failed to write pid to file %s as node manager - %s\n",
temp_pid_file, strerror(errno));
 fflush(LOGFILE);
-free(temp_pid_file);
-return -1;
+rc = -1;
+goto cleanup;
   }
 
   // rename temp file to actual pid file
@@ -279,18 +287,18 @@ static int write_pid_to_file_as_nm(const char* pid_file, 
pid_t pid) {
 temp_pid_file, pid_file, strerror(errno));
 fflush(LOGFILE);
 unlink(temp_pid_file);
-free(temp_pid_file);
-return -1;
+rc = -1;
+goto cleanup;
   }
 
+cleanup:
   // Revert back to the calling user.
   if (change_effective_user(user, group)) {
-   free(temp_pid_file);
-return -1;
+rc = -1;
   }
 
   

hadoop git commit: HDDS-355. Disable OpenKeyDeleteService and DeleteKeysService. Contributed by Anu Engineer.

2018-08-17 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 60ffec9f7 -> ab37423ad


HDDS-355. Disable OpenKeyDeleteService and DeleteKeysService.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab37423a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab37423a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab37423a

Branch: refs/heads/trunk
Commit: ab37423ad8debe2f050133ad97b686083531c2ea
Parents: 60ffec9
Author: Anu Engineer 
Authored: Fri Aug 17 11:50:46 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 17 11:50:46 2018 -0700

--
 .../commandhandler/TestBlockDeletion.java   |  2 ++
 .../hadoop/ozone/om/TestOzoneManager.java   | 11 +++---
 .../apache/hadoop/ozone/om/package-info.java| 22 
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  | 37 
 4 files changed, 36 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab37423a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index badd435..45659bd 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.utils.MetadataStore;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.File;
@@ -102,6 +103,7 @@ public class TestBlockDeletion {
   }
 
   @Test(timeout = 6)
+  @Ignore("Until delete background service is fixed.")
   public void testBlockDeletion()
   throws IOException, InterruptedException {
 String volumeName = UUID.randomUUID().toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab37423a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
index 7c8595c..5109453 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
@@ -56,12 +56,12 @@ import org.apache.hadoop.ozone.web.response.ListBuckets;
 import org.apache.hadoop.ozone.web.response.ListKeys;
 import org.apache.hadoop.ozone.web.response.ListVolumes;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BackgroundService;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -1188,10 +1188,11 @@ public class TestOzoneManager {
   }
 
 
-  @Test
+  //Disabling this test
+  @Ignore("Disabling this test until Open Key is fixed.")
   public void testExpiredOpenKey() throws Exception {
-BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster
-.getOzoneManager().getKeyManager()).getOpenKeyCleanupService();
+//BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster
+//.getOzoneManager().getKeyManager()).getOpenKeyCleanupService();
 
 String userName = "user" + RandomStringUtils.randomNumeric(5);
 String adminName = "admin" + RandomStringUtils.randomNumeric(5);
@@ -1252,7 +1253,7 @@ public class TestOzoneManager {
 KeyArgs keyArgs5 = new KeyArgs("testKey5", bucketArgs);
 storageHandler.newKeyWriter(keyArgs5);
 
-openKeyCleanUpService.triggerBackgroundTaskForTesting();
+//openKeyCleanUpService.triggerBackgroundTaskForTesting();
 Thread.sleep(2000);
 // now all k1-k4 should have been removed by the clean-up task, only k5
 // should be present in ExpiredOpenKeys.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab37423a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java

hadoop git commit: HADOOP-9214. Create a new touch command to allow modifying atime and mtime. Contributed by Hrishikesh Gadre.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk a17eed1b8 -> 60ffec9f7


HADOOP-9214. Create a new touch command to allow modifying atime and mtime. 
Contributed by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60ffec9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60ffec9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60ffec9f

Branch: refs/heads/trunk
Commit: 60ffec9f7921a50aff20434c1042b16fa59240f7
Parents: a17eed1
Author: Xiao Chen 
Authored: Fri Aug 17 10:53:22 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 11:18:09 2018 -0700

--
 .../org/apache/hadoop/fs/shell/FsCommand.java   |   2 +-
 .../java/org/apache/hadoop/fs/shell/Touch.java  |  85 
 .../apache/hadoop/fs/shell/TouchCommands.java   | 198 +++
 .../src/site/markdown/FileSystemShell.md|  32 +++
 .../org/apache/hadoop/fs/TestFsShellTouch.java  | 103 ++
 .../src/test/resources/testConf.xml |  51 +
 6 files changed, 385 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60ffec9f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
index 4a13414..784bbf3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
@@ -66,7 +66,7 @@ abstract public class FsCommand extends Command {
 factory.registerCommands(Tail.class);
 factory.registerCommands(Head.class);
 factory.registerCommands(Test.class);
-factory.registerCommands(Touch.class);
+factory.registerCommands(TouchCommands.class);
 factory.registerCommands(Truncate.class);
 factory.registerCommands(SnapshotCommands.class);
 factory.registerCommands(XAttrCommands.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60ffec9f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
deleted file mode 100644
index a6c751e..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.shell;
-
-import java.io.IOException;
-import java.util.LinkedList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.PathIOException;
-import org.apache.hadoop.fs.PathIsDirectoryException;
-import org.apache.hadoop.fs.PathNotFoundException;
-
-/**
- * Unix touch like commands
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-
-class Touch extends FsCommand {
-  public static void registerCommands(CommandFactory factory) {
-factory.addClass(Touchz.class, "-touchz");
-  }
-
-  /**
-   * (Re)create zero-length file at the specified path.
-   * This will be replaced by a more UNIX-like touch when files may be
-   * modified.
-   */
-  public static class Touchz extends Touch {
-public static final String NAME = "touchz";
-public static final String USAGE = " ...";
-public static final String DESCRIPTION =
-  "Creates a file of zero length " +
-  "at  with current time as the timestamp of that . " +
-  "An error is returned if the file exists with non-zero length\n";
-
-@Override
-protected void processOptions(LinkedList args) {
-  

hadoop git commit: HADOOP-14154 Persist isAuthoritative bit in DynamoDBMetaStore (Contributed by Gabor Bota)

2018-08-17 Thread fabbri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8d7c93186 -> d7232857d


HADOOP-14154 Persist isAuthoritative bit in DynamoDBMetaStore (Contributed by 
Gabor Bota)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7232857
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7232857
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7232857

Branch: refs/heads/trunk
Commit: d7232857d8d1e10cdac171acdc931187e45fd6be
Parents: 8d7c931
Author: Aaron Fabbri 
Authored: Fri Aug 17 10:08:30 2018 -0700
Committer: Aaron Fabbri 
Committed: Fri Aug 17 10:15:39 2018 -0700

--
 .../hadoop/fs/s3a/s3guard/DDBPathMetadata.java  |  77 +++
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   | 130 +++
 .../PathMetadataDynamoDBTranslation.java|  71 --
 .../apache/hadoop/fs/s3a/s3guard/S3Guard.java   |   4 +
 .../site/markdown/tools/hadoop-aws/s3guard.md   |   5 +-
 .../fs/s3a/s3guard/MetadataStoreTestBase.java   |  49 +++
 .../TestPathMetadataDynamoDBTranslation.java|  47 ++-
 7 files changed, 337 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7232857/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DDBPathMetadata.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DDBPathMetadata.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DDBPathMetadata.java
new file mode 100644
index 000..a67fc4e
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DDBPathMetadata.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.s3guard;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.s3a.Tristate;
+
+/**
+ * {@code DDBPathMetadata} wraps {@link PathMetadata} and adds the
+ * isAuthoritativeDir flag to provide support for authoritative directory
+ * listings in {@link DynamoDBMetadataStore}.
+ */
+public class DDBPathMetadata extends PathMetadata {
+
+  private boolean isAuthoritativeDir;
+
+  public DDBPathMetadata(PathMetadata pmd, boolean isAuthoritativeDir) {
+super(pmd.getFileStatus(), pmd.isEmptyDirectory(), pmd.isDeleted());
+this.isAuthoritativeDir = isAuthoritativeDir;
+  }
+
+  public DDBPathMetadata(PathMetadata pmd) {
+super(pmd.getFileStatus(), pmd.isEmptyDirectory(), pmd.isDeleted());
+this.isAuthoritativeDir = false;
+  }
+
+  public DDBPathMetadata(FileStatus fileStatus) {
+super(fileStatus);
+this.isAuthoritativeDir = false;
+  }
+
+  public DDBPathMetadata(FileStatus fileStatus, Tristate isEmptyDir,
+  boolean isDeleted) {
+super(fileStatus, isEmptyDir, isDeleted);
+this.isAuthoritativeDir = false;
+  }
+
+  public DDBPathMetadata(FileStatus fileStatus, Tristate isEmptyDir,
+  boolean isDeleted, boolean isAuthoritativeDir) {
+super(fileStatus, isEmptyDir, isDeleted);
+this.isAuthoritativeDir = isAuthoritativeDir;
+  }
+
+  public boolean isAuthoritativeDir() {
+return isAuthoritativeDir;
+  }
+
+  public void setAuthoritativeDir(boolean authoritativeDir) {
+isAuthoritativeDir = authoritativeDir;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+return super.equals(o);
+  }
+
+  @Override public int hashCode() {
+return super.hashCode();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7232857/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index ba80b88..ddb493f 100644
--- 

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 4845464ba -> 2ed1a5d00


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)

 Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java

(cherry picked from commit 5232653ec0bf8d08187f41930eec073e1b7b1df2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ed1a5d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ed1a5d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ed1a5d0

Branch: refs/heads/branch-2.7
Commit: 2ed1a5d000e3c18003a51dae3d161dcc19f0a307
Parents: 4845464
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:21:10 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 61 ++--
 1 file changed, 57 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ed1a5d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 0436cc0..cdeb3b5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -32,8 +32,6 @@ import javax.net.ssl.SSLHandshakeException;
 import javax.net.ssl.SSLSocket;
 import javax.net.ssl.SSLSocketFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.IOUtils;
@@ -43,10 +41,12 @@ import org.apache.hadoop.security.ssl.SSLFactory;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -54,12 +54,17 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
   private static final String BASEDIR = System.getProperty("test.build.dir",
   "target/test-dir") + "/" + TestSSLHttpServer.class.getSimpleName();
 
-  private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -80,6 +85,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
 
@@ -125,6 +133,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+ 

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 185c8f2ab -> a41f18098


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a41f1809
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a41f1809
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a41f1809

Branch: refs/heads/branch-3.0
Commit: a41f18098b849ba2ccbae824a5a57bfe7b6ad44e
Parents: 185c8f2
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:20:20 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 54 +++-
 1 file changed, 53 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a41f1809/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 5af6d6f..2166464 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -58,11 +58,15 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -83,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
 
@@ -127,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void restoreHttpsCipherSuites() {
+if (cipherSuitesPropertyValue != null) {
+  LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuitesPropertyValue);
+  System.setProperty(HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue);
+  cipherSuitesPropertyValue = null;
+}
+  }
+
+  private static void turnOnSSLDebugLogging() {
+String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
+if (sslDebug != null) {
+  sslDebugPropertyValue = sslDebug;
+}
+System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
+  }
+
+  private static void restoreSSLDebugLogging() {
+if (sslDebugPropertyValue != null) {
+  System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyValue);
+  sslDebugPropertyValue = null;
+} else {
+  

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 50ba2272e -> 42c47971d


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42c47971
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42c47971
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42c47971

Branch: refs/heads/branch-2.9
Commit: 42c47971d8bb2ce2ea06b4e94d7c12b4b61870cd
Parents: 50ba227
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:20:37 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 54 +++-
 1 file changed, 53 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42c47971/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 3c68986..38fd926 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -58,11 +58,15 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -83,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
 
@@ -127,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void restoreHttpsCipherSuites() {
+if (cipherSuitesPropertyValue != null) {
+  LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuitesPropertyValue);
+  System.setProperty(HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue);
+  cipherSuitesPropertyValue = null;
+}
+  }
+
+  private static void turnOnSSLDebugLogging() {
+String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
+if (sslDebug != null) {
+  sslDebugPropertyValue = sslDebug;
+}
+System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
+  }
+
+  private static void restoreSSLDebugLogging() {
+if (sslDebugPropertyValue != null) {
+  System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyValue);
+  sslDebugPropertyValue = null;
+} else {
+  System.clearProperty(JAVAX_NET_DEBUG_KEY);

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 2a6b62655 -> 5232653ec


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)

 Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5232653e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5232653e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5232653e

Branch: refs/heads/branch-2.8
Commit: 5232653ec0bf8d08187f41930eec073e1b7b1df2
Parents: 2a6b626
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:20:48 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 61 ++--
 1 file changed, 57 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5232653e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index f52a055..38fd926 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -32,8 +32,6 @@ import javax.net.ssl.SSLHandshakeException;
 import javax.net.ssl.SSLSocket;
 import javax.net.ssl.SSLSocketFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.IOUtils;
@@ -45,10 +43,12 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -56,12 +56,17 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
   private static final String BASEDIR =
   GenericTestUtils.getTempPath(TestSSLHttpServer.class.getSimpleName());
 
-  private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -82,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
 
@@ -126,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void 

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e2210a517 -> 805647287


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80564728
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80564728
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80564728

Branch: refs/heads/branch-2
Commit: 8056472879ff150011887e8f12948ed2ce7534ca
Parents: e2210a5
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:20:29 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 54 +++-
 1 file changed, 53 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80564728/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 3c68986..38fd926 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -58,11 +58,15 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -83,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
 
@@ -127,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void restoreHttpsCipherSuites() {
+if (cipherSuitesPropertyValue != null) {
+  LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuitesPropertyValue);
+  System.setProperty(HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue);
+  cipherSuitesPropertyValue = null;
+}
+  }
+
+  private static void turnOnSSLDebugLogging() {
+String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
+if (sslDebug != null) {
+  sslDebugPropertyValue = sslDebug;
+}
+System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
+  }
+
+  private static void restoreSSLDebugLogging() {
+if (sslDebugPropertyValue != null) {
+  System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyValue);
+  sslDebugPropertyValue = null;
+} else {
+  System.clearProperty(JAVAX_NET_DEBUG_KEY);
+   

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 06f0d5e25 -> 7556b09e9


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7556b09e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7556b09e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7556b09e

Branch: refs/heads/branch-3.1
Commit: 7556b09e9a790e5bc697396b0871d99ed3cf1318
Parents: 06f0d5e
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:20:12 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 54 +++-
 1 file changed, 53 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7556b09e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 5af6d6f..2166464 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -58,11 +58,15 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -83,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
 
@@ -127,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void restoreHttpsCipherSuites() {
+if (cipherSuitesPropertyValue != null) {
+  LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuitesPropertyValue);
+  System.setProperty(HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue);
+  cipherSuitesPropertyValue = null;
+}
+  }
+
+  private static void turnOnSSLDebugLogging() {
+String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
+if (sslDebug != null) {
+  sslDebugPropertyValue = sslDebug;
+}
+System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
+  }
+
+  private static void restoreSSLDebugLogging() {
+if (sslDebugPropertyValue != null) {
+  System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyValue);
+  sslDebugPropertyValue = null;
+} else {
+  

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk fb5b3dce6 -> 8d7c93186


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d7c9318
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d7c9318
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d7c9318

Branch: refs/heads/trunk
Commit: 8d7c93186e3090b19aa59006bb6b32ba929bd8e6
Parents: fb5b3dc
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:09:23 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 54 +++-
 1 file changed, 53 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d7c9318/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 5af6d6f..2166464 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -58,11 +58,15 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -83,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
 
@@ -127,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void restoreHttpsCipherSuites() {
+if (cipherSuitesPropertyValue != null) {
+  LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuitesPropertyValue);
+  System.setProperty(HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue);
+  cipherSuitesPropertyValue = null;
+}
+  }
+
+  private static void turnOnSSLDebugLogging() {
+String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
+if (sslDebug != null) {
+  sslDebugPropertyValue = sslDebug;
+}
+System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
+  }
+
+  private static void restoreSSLDebugLogging() {
+if (sslDebugPropertyValue != null) {
+  System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyValue);
+  sslDebugPropertyValue = null;
+} else {
+  System.clearProperty(JAVAX_NET_DEBUG_KEY);
+}
   }
 
   @Test



[1/2] hadoop git commit: HDDS-218. add existing docker-compose files to the ozone release artifact. Contributed by Elek Marton.

2018-08-17 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk fa121eb66 -> fb5b3dce6


HDDS-218. add existing docker-compose files to the ozone release artifact. 
Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dd5d5ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dd5d5ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dd5d5ba

Branch: refs/heads/trunk
Commit: 9dd5d5ba713240c559b102fa3172b10077f5da87
Parents: fa121eb
Author: Xiaoyu Yao 
Authored: Fri Aug 17 07:58:24 2018 -0700
Committer: Xiaoyu Yao 
Committed: Fri Aug 17 07:58:24 2018 -0700

--
 dev-support/bin/ozone-dist-layout-stitching |  2 +
 hadoop-dist/pom.xml | 17 ---
 hadoop-dist/src/main/compose/README.md  | 51 
 .../src/main/compose/ozone/docker-compose.yaml  |  6 +--
 .../main/compose/ozoneperf/docker-compose.yaml  |  6 +--
 hadoop-ozone/docs/content/GettingStarted.md |  2 +-
 6 files changed, 60 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dd5d5ba/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index c30a37d..2ba7791 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -151,6 +151,8 @@ cp 
"${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/ozone/webapps/ozoneManager/
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/hdds/webapps/scm/
 
+#Copy docker compose files
+run cp -p -r "${ROOT}/hadoop-dist/src/main/compose" .
 
 mkdir -p ./share/hadoop/mapreduce
 mkdir -p ./share/hadoop/yarn

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dd5d5ba/hadoop-dist/pom.xml
--
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 5de6759..da05015 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -277,23 +277,6 @@
 maven-resources-plugin
 
   
-copy-docker-compose
-
-  copy-resources
-
-prepare-package
-
-  ${project.build.directory}/compose
-  
-  
-
-  src/main/compose
-  true
-
-  
-
-  
-  
 copy-dockerfile
 
   copy-resources

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dd5d5ba/hadoop-dist/src/main/compose/README.md
--
diff --git a/hadoop-dist/src/main/compose/README.md 
b/hadoop-dist/src/main/compose/README.md
new file mode 100644
index 000..8189d2c
--- /dev/null
+++ b/hadoop-dist/src/main/compose/README.md
@@ -0,0 +1,51 @@
+
+
+# Docker cluster definitions
+
+This directory contains multiple docker cluster definitions to start local 
pseudo cluster with different configuration.
+
+It helps to start local (multi-node like) pseudo cluster with docker and 
docker-compose and obviously it's not for production.
+
+You may find more information in the specific subdirectories but in generic 
you can use the following commands:
+
+## Usage
+
+To start a cluster go to a subdirectory and start the cluster:
+
+```
+docker-compose up -d
+```
+
+You can check the logs of all the components with:
+
+```
+docker-compose logs
+```
+
+In case of a problem you can destroy the cluster an delete all the local state 
with:
+
+```
+docker-compose down
+```
+
+(Note: a simple docker-compose stop may not delete all the local data).
+
+You can scale up and down the components:
+
+```
+docker-compose scale datanode=5
+```
+
+Usually the key webui ports are published on the docker host.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dd5d5ba/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml 
b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
index bb5e8dd..0a6a9d8 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
@@ -19,7 +19,7 @@ services:
datanode:
   image: apache/hadoop-runner
   volumes:
-- ../../ozone:/opt/hadoop
+- ../..:/opt/hadoop
   ports:
 - 9864
   

[2/2] hadoop git commit: Revert "HDFS-13790. RBF: Move ClientProtocol APIs to its own module. Contributed by Chao Sun."

2018-08-17 Thread xyao
Revert "HDFS-13790. RBF: Move ClientProtocol APIs to its own module. 
Contributed by Chao Sun."

This reverts commit fa121eb66bc42e9cb5586f8c2e268cfdc2ed187a.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb5b3dce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb5b3dce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb5b3dce

Branch: refs/heads/trunk
Commit: fb5b3dce6192265bce9b9d93ab663bdc5be8048e
Parents: 9dd5d5b
Author: Xiaoyu Yao 
Authored: Fri Aug 17 08:01:44 2018 -0700
Committer: Xiaoyu Yao 
Committed: Fri Aug 17 08:01:44 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 1360 --
 1 file changed, 1202 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb5b3dce/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index fe54993..29f32a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -33,12 +33,16 @@ import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
@@ -50,6 +54,7 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttr;
@@ -59,6 +64,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.AddBlockFlag;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
@@ -87,6 +93,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@@ -94,8 +101,8 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
@@ -160,6 +167,11 @@ public class RouterRpcServer extends AbstractService
   /** Configuration for the RPC server. */
   private Configuration conf;
 
+  /** Identifier for the super user. */
+  private final String superUser;
+  /** Identifier for the super group. */
+  private final String superGroup;
+
   /** Router using this RPC server. */
   private final Router router;
 
@@ -187,10 +199,11 @@ public class RouterRpcServer extends AbstractService
   // Modules implementing groups of RPC calls
   /** Router Quota calls. */
   private final Quota quotaCall;
+  /** Erasure coding calls. */
+  private final ErasureCoding erasureCoding;
   /** NamenodeProtocol calls. */
   private final 

hadoop git commit: HDFS-13790. RBF: Move ClientProtocol APIs to its own module. Contributed by Chao Sun.

2018-08-17 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 77b015000 -> fa121eb66


HDFS-13790. RBF: Move ClientProtocol APIs to its own module. Contributed by 
Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa121eb6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa121eb6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa121eb6

Branch: refs/heads/trunk
Commit: fa121eb66bc42e9cb5586f8c2e268cfdc2ed187a
Parents: 77b0150
Author: Brahma Reddy Battula 
Authored: Fri Aug 17 15:22:55 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Fri Aug 17 15:22:55 2018 +0530

--
 .../federation/router/RouterRpcServer.java  | 1360 ++
 1 file changed, 158 insertions(+), 1202 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa121eb6/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 29f32a6..fe54993 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -33,16 +33,12 @@ import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
@@ -54,7 +50,6 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttr;
@@ -64,7 +59,6 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.AddBlockFlag;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
@@ -93,7 +87,6 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
-import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@@ -101,8 +94,8 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
@@ -167,11 +160,6 @@ public class RouterRpcServer extends AbstractService
   /** Configuration for the RPC server. */
   private Configuration conf;
 
-  /** Identifier for the super user. */
-  private final String superUser;
-  /** Identifier for the super group. */
-  private final String superGroup;
-
   /** Router using this RPC server. */
   private final Router router;
 
@@ -199,11 +187,10 @@ public class RouterRpcServer extends AbstractService
   // Modules implementing groups of RPC calls
   /** Router Quota calls. */
   private final Quota quotaCall;
-  /** Erasure coding calls. */
-  private final ErasureCoding erasureCoding;
   /** NamenodeProtocol 

hadoop git commit: HADOOP-8807. Update README and website to reflect HADOOP-8662. Contributed by Andras Bokor.

2018-08-17 Thread elek
Repository: hadoop
Updated Branches:
  refs/heads/trunk c67b0650e -> 77b015000


HADOOP-8807. Update README and website to reflect HADOOP-8662. Contributed by 
Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77b01500
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77b01500
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77b01500

Branch: refs/heads/trunk
Commit: 77b015000a48545209928e31630adaaf6960b4c5
Parents: c67b065
Author: Márton Elek 
Authored: Fri Aug 17 11:07:23 2018 +0200
Committer: Márton Elek 
Committed: Fri Aug 17 11:10:29 2018 +0200

--
 README.txt | 2 +-
 .../src/main/java/org/apache/hadoop/util/ServletUtil.java  | 2 +-
 .../hadoop-hdfs/src/main/native/docs/libhdfs_footer.html   | 2 +-
 hadoop-mapreduce-project/pom.xml   | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77b01500/README.txt
--
diff --git a/README.txt b/README.txt
index 148cd31..559099b 100644
--- a/README.txt
+++ b/README.txt
@@ -1,6 +1,6 @@
 For the latest information about Hadoop, please visit our website at:
 
-   http://hadoop.apache.org/core/
+   http://hadoop.apache.org/
 
 and our wiki, at:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77b01500/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java
index 2fd9b55..9ba9e94 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java
@@ -74,7 +74,7 @@ public class ServletUtil {
   }
 
   public static final String HTML_TAIL = "\n"
-+ "Hadoop, "
++ "Hadoop, "
 + Calendar.getInstance().get(Calendar.YEAR) + ".\n"
 + "";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77b01500/hadoop-hdfs-project/hadoop-hdfs/src/main/native/docs/libhdfs_footer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/docs/libhdfs_footer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/docs/libhdfs_footer.html
index cb69b63..35930c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/docs/libhdfs_footer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/docs/libhdfs_footer.html
@@ -21,5 +21,5 @@
 
 
 http://wiki.apache.org/hadoop/LibHDFS;>libhdfs - 
-http://hadoop.apache.org/core/;>Hadoop 
+http://hadoop.apache.org/;>Hadoop
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77b01500/hadoop-mapreduce-project/pom.xml
--
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index e75461a..fe89729 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -25,7 +25,7 @@
   3.2.0-SNAPSHOT
   pom
   Apache Hadoop MapReduce
-  http://hadoop.apache.org/mapreduce/
+  http://hadoop.apache.org/
 
   
 true


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13747. Statistic for list_located_status is incremented incorrectly by listStatusIterator. Contributed by Antal Mihalyi.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 3532bd5c8 -> 06f0d5e25


HDFS-13747. Statistic for list_located_status is incremented incorrectly by 
listStatusIterator. Contributed by Antal Mihalyi.

(cherry picked from commit c67b0650ea10896c6289703595faef0d262c00b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06f0d5e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06f0d5e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06f0d5e2

Branch: refs/heads/branch-3.1
Commit: 06f0d5e257a3208f905f59019623a589825e4c8b
Parents: 3532bd5
Author: Xiao Chen 
Authored: Thu Aug 16 23:13:10 2018 -0700
Committer: Xiao Chen 
Committed: Thu Aug 16 23:15:48 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DistributedFileSystem.java| 6 +-
 .../org/apache/hadoop/hdfs/TestDistributedFileSystem.java | 7 +++
 2 files changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06f0d5e2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 3519c60..de05f82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1217,7 +1217,11 @@ public class DistributedFileSystem extends FileSystem
   thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME,
   needLocation);
   statistics.incrementReadOps(1);
-  storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  if (needLocation) {
+storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  } else {
+storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
+  }
   if (thisListing == null) { // the directory does not exist
 throw new FileNotFoundException("File " + p + " does not exist.");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06f0d5e2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 072ee9f..03e6c8a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -706,6 +706,7 @@ public class TestDistributedFileSystem {
   // Iterative ls test
   long mkdirOp = getOpStatistics(OpType.MKDIRS);
   long listStatusOp = getOpStatistics(OpType.LIST_STATUS);
+  long locatedListStatusOP = getOpStatistics(OpType.LIST_LOCATED_STATUS);
   for (int i = 0; i < 10; i++) {
 Path p = new Path(dir, Integer.toString(i));
 fs.mkdirs(p);
@@ -729,6 +730,12 @@ public class TestDistributedFileSystem {
 checkStatistics(fs, readOps, ++writeOps, largeReadOps);
 checkOpStatistics(OpType.MKDIRS, mkdirOp);
 checkOpStatistics(OpType.LIST_STATUS, listStatusOp);
+
+fs.listLocatedStatus(dir);
+locatedListStatusOP++;
+readOps++;
+checkStatistics(fs, readOps, writeOps, largeReadOps);
+checkOpStatistics(OpType.LIST_LOCATED_STATUS, locatedListStatusOP);
   }
   
   opCount = getOpStatistics(OpType.GET_STATUS);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13747. Statistic for list_located_status is incremented incorrectly by listStatusIterator. Contributed by Antal Mihalyi.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 90bf2d3b5 -> 185c8f2ab


HDFS-13747. Statistic for list_located_status is incremented incorrectly by 
listStatusIterator. Contributed by Antal Mihalyi.

(cherry picked from commit c67b0650ea10896c6289703595faef0d262c00b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/185c8f2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/185c8f2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/185c8f2a

Branch: refs/heads/branch-3.0
Commit: 185c8f2abc364e4941ca4d4522fb61b5b3f5f903
Parents: 90bf2d3
Author: Xiao Chen 
Authored: Thu Aug 16 23:13:10 2018 -0700
Committer: Xiao Chen 
Committed: Thu Aug 16 23:15:58 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DistributedFileSystem.java| 6 +-
 .../org/apache/hadoop/hdfs/TestDistributedFileSystem.java | 7 +++
 2 files changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/185c8f2a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 84d840f..9208e66 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1143,7 +1143,11 @@ public class DistributedFileSystem extends FileSystem
   thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME,
   needLocation);
   statistics.incrementReadOps(1);
-  storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  if (needLocation) {
+storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  } else {
+storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
+  }
   if (thisListing == null) { // the directory does not exist
 throw new FileNotFoundException("File " + p + " does not exist.");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/185c8f2a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 072ee9f..03e6c8a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -706,6 +706,7 @@ public class TestDistributedFileSystem {
   // Iterative ls test
   long mkdirOp = getOpStatistics(OpType.MKDIRS);
   long listStatusOp = getOpStatistics(OpType.LIST_STATUS);
+  long locatedListStatusOP = getOpStatistics(OpType.LIST_LOCATED_STATUS);
   for (int i = 0; i < 10; i++) {
 Path p = new Path(dir, Integer.toString(i));
 fs.mkdirs(p);
@@ -729,6 +730,12 @@ public class TestDistributedFileSystem {
 checkStatistics(fs, readOps, ++writeOps, largeReadOps);
 checkOpStatistics(OpType.MKDIRS, mkdirOp);
 checkOpStatistics(OpType.LIST_STATUS, listStatusOp);
+
+fs.listLocatedStatus(dir);
+locatedListStatusOP++;
+readOps++;
+checkStatistics(fs, readOps, writeOps, largeReadOps);
+checkOpStatistics(OpType.LIST_LOCATED_STATUS, locatedListStatusOP);
   }
   
   opCount = getOpStatistics(OpType.GET_STATUS);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13747. Statistic for list_located_status is incremented incorrectly by listStatusIterator. Contributed by Antal Mihalyi.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1697a0230 -> c67b0650e


HDFS-13747. Statistic for list_located_status is incremented incorrectly by 
listStatusIterator. Contributed by Antal Mihalyi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c67b0650
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c67b0650
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c67b0650

Branch: refs/heads/trunk
Commit: c67b0650ea10896c6289703595faef0d262c00b3
Parents: 1697a02
Author: Xiao Chen 
Authored: Thu Aug 16 23:13:10 2018 -0700
Committer: Xiao Chen 
Committed: Thu Aug 16 23:14:21 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DistributedFileSystem.java| 6 +-
 .../org/apache/hadoop/hdfs/TestDistributedFileSystem.java | 7 +++
 2 files changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c67b0650/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 70b3679..28c1e27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1217,7 +1217,11 @@ public class DistributedFileSystem extends FileSystem
   thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME,
   needLocation);
   statistics.incrementReadOps(1);
-  storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  if (needLocation) {
+storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  } else {
+storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
+  }
   if (thisListing == null) { // the directory does not exist
 throw new FileNotFoundException("File " + p + " does not exist.");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c67b0650/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index f09255e..46323dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -706,6 +706,7 @@ public class TestDistributedFileSystem {
   // Iterative ls test
   long mkdirOp = getOpStatistics(OpType.MKDIRS);
   long listStatusOp = getOpStatistics(OpType.LIST_STATUS);
+  long locatedListStatusOP = getOpStatistics(OpType.LIST_LOCATED_STATUS);
   for (int i = 0; i < 10; i++) {
 Path p = new Path(dir, Integer.toString(i));
 fs.mkdirs(p);
@@ -729,6 +730,12 @@ public class TestDistributedFileSystem {
 checkStatistics(fs, readOps, ++writeOps, largeReadOps);
 checkOpStatistics(OpType.MKDIRS, mkdirOp);
 checkOpStatistics(OpType.LIST_STATUS, listStatusOp);
+
+fs.listLocatedStatus(dir);
+locatedListStatusOP++;
+readOps++;
+checkStatistics(fs, readOps, writeOps, largeReadOps);
+checkOpStatistics(OpType.LIST_LOCATED_STATUS, locatedListStatusOP);
   }
   
   opCount = getOpStatistics(OpType.GET_STATUS);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org