hadoop git commit: YARN-6771. Use classloader inside configuration class to make new classes. Contributed by Jongyoul Lee.

2017-09-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 4767d1bb1 -> f919bcadb


YARN-6771. Use classloader inside configuration class to make new
classes. Contributed by Jongyoul Lee.

(cherry picked from commit 8b336632acad10e45d029596c5e3196e1857d891)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f919bcad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f919bcad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f919bcad

Branch: refs/heads/branch-3.0
Commit: f919bcadb70f6ff1c87ae45b4fa04fa61ec8ddd7
Parents: 4767d1b
Author: Sangjin Lee 
Authored: Wed Sep 20 22:22:43 2017 -0700
Committer: Sangjin Lee 
Committed: Wed Sep 20 22:54:30 2017 -0700

--
 .../impl/pb/RpcClientFactoryPBImpl.java |  3 +-
 .../impl/pb/RpcServerFactoryPBImpl.java |  5 +-
 .../impl/pb/TestRpcClientFactoryPBImpl.java | 49 
 .../impl/pb/TestRpcServerFactoryPBImpl.java | 48 +++
 4 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f919bcad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 062fa66..07c5e23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -45,7 +45,6 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
   
   private static final RpcClientFactoryPBImpl self = new 
RpcClientFactoryPBImpl();
-  private Configuration localConf = new Configuration();
   private ConcurrentMap cache = new 
ConcurrentHashMap();
   
   public static RpcClientFactoryPBImpl get() {
@@ -62,7 +61,7 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
 if (constructor == null) {
   Class pbClazz = null;
   try {
-pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+pbClazz = conf.getClassByName(getPBImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
 + getPBImplClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f919bcad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index 60e549a..ec9a5f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -51,7 +51,6 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
   
   private static final RpcServerFactoryPBImpl self = new 
RpcServerFactoryPBImpl();
 
-  private Configuration localConf = new Configuration();
   private ConcurrentMap serviceCache = new 
ConcurrentHashMap();
   private ConcurrentMap protoCache = new 
ConcurrentHashMap();
   
@@ -80,7 +79,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (constructor == null) {
   Class pbServiceImplClazz = null;
   try {
-pbServiceImplClazz = localConf
+pbServiceImplClazz = conf
 .getClassByName(getPbServiceImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
@@ -113,7 +112,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (method == null) {
   Class protoClazz = null;
   try {
-protoClazz = localConf.getClassByName(getProtoClassName(protocol));
+

[1/2] hadoop git commit: YARN-6771. Use classloader inside configuration class to make new classes. Contributed by Jongyoul Lee.

2017-09-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7d747df52 -> a79422f71
  refs/heads/branch-2.8 c54310a63 -> 6d6758f75


YARN-6771. Use classloader inside configuration class to make new
classes. Contributed by Jongyoul Lee.

(cherry picked from commit 8b336632acad10e45d029596c5e3196e1857d891)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a79422f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a79422f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a79422f7

Branch: refs/heads/branch-2
Commit: a79422f71727ed764fb9e89e3eaf0e079c27991e
Parents: 7d747df
Author: Sangjin Lee 
Authored: Wed Sep 20 22:22:43 2017 -0700
Committer: Sangjin Lee 
Committed: Wed Sep 20 22:46:10 2017 -0700

--
 .../impl/pb/RpcClientFactoryPBImpl.java |  3 +-
 .../impl/pb/RpcServerFactoryPBImpl.java |  5 +-
 .../impl/pb/TestRpcClientFactoryPBImpl.java | 49 
 .../impl/pb/TestRpcServerFactoryPBImpl.java | 48 +++
 4 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a79422f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 062fa66..07c5e23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -45,7 +45,6 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
   
   private static final RpcClientFactoryPBImpl self = new 
RpcClientFactoryPBImpl();
-  private Configuration localConf = new Configuration();
   private ConcurrentMap cache = new 
ConcurrentHashMap();
   
   public static RpcClientFactoryPBImpl get() {
@@ -62,7 +61,7 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
 if (constructor == null) {
   Class pbClazz = null;
   try {
-pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+pbClazz = conf.getClassByName(getPBImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
 + getPBImplClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a79422f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index 60e549a..ec9a5f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -51,7 +51,6 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
   
   private static final RpcServerFactoryPBImpl self = new 
RpcServerFactoryPBImpl();
 
-  private Configuration localConf = new Configuration();
   private ConcurrentMap serviceCache = new 
ConcurrentHashMap();
   private ConcurrentMap protoCache = new 
ConcurrentHashMap();
   
@@ -80,7 +79,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (constructor == null) {
   Class pbServiceImplClazz = null;
   try {
-pbServiceImplClazz = localConf
+pbServiceImplClazz = conf
 .getClassByName(getPbServiceImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
@@ -113,7 +112,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (method == null) {
   Class protoClazz = null;
   try {
-protoClazz = 

[2/2] hadoop git commit: YARN-6771. Use classloader inside configuration class to make new classes. Contributed by Jongyoul Lee.

2017-09-20 Thread sjlee
YARN-6771. Use classloader inside configuration class to make new
classes. Contributed by Jongyoul Lee.

(cherry picked from commit 8b336632acad10e45d029596c5e3196e1857d891)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d6758f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d6758f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d6758f7

Branch: refs/heads/branch-2.8
Commit: 6d6758f751f4f230f90117e58dd0ae1006320a47
Parents: c54310a
Author: Sangjin Lee 
Authored: Wed Sep 20 22:22:43 2017 -0700
Committer: Sangjin Lee 
Committed: Wed Sep 20 22:50:55 2017 -0700

--
 .../impl/pb/RpcClientFactoryPBImpl.java |  3 +-
 .../impl/pb/RpcServerFactoryPBImpl.java |  5 +-
 .../impl/pb/TestRpcClientFactoryPBImpl.java | 49 
 .../impl/pb/TestRpcServerFactoryPBImpl.java | 48 +++
 4 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6758f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 062fa66..07c5e23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -45,7 +45,6 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
   
   private static final RpcClientFactoryPBImpl self = new 
RpcClientFactoryPBImpl();
-  private Configuration localConf = new Configuration();
   private ConcurrentMap cache = new 
ConcurrentHashMap();
   
   public static RpcClientFactoryPBImpl get() {
@@ -62,7 +61,7 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
 if (constructor == null) {
   Class pbClazz = null;
   try {
-pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+pbClazz = conf.getClassByName(getPBImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
 + getPBImplClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6758f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index 60e549a..ec9a5f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -51,7 +51,6 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
   
   private static final RpcServerFactoryPBImpl self = new 
RpcServerFactoryPBImpl();
 
-  private Configuration localConf = new Configuration();
   private ConcurrentMap serviceCache = new 
ConcurrentHashMap();
   private ConcurrentMap protoCache = new 
ConcurrentHashMap();
   
@@ -80,7 +79,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (constructor == null) {
   Class pbServiceImplClazz = null;
   try {
-pbServiceImplClazz = localConf
+pbServiceImplClazz = conf
 .getClassByName(getPbServiceImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
@@ -113,7 +112,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (method == null) {
   Class protoClazz = null;
   try {
-protoClazz = localConf.getClassByName(getProtoClassName(protocol));
+protoClazz = conf.getClassByName(getProtoClassName(protocol));
   } catch 

hadoop git commit: YARN-6771. Use classloader inside configuration class to make new classes. Contributed by Jongyoul Lee.

2017-09-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 53047f934 -> 8b336632a


YARN-6771. Use classloader inside configuration class to make new
classes. Contributed by Jongyoul Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b336632
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b336632
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b336632

Branch: refs/heads/trunk
Commit: 8b336632acad10e45d029596c5e3196e1857d891
Parents: 53047f9
Author: Sangjin Lee 
Authored: Wed Sep 20 22:22:43 2017 -0700
Committer: Sangjin Lee 
Committed: Wed Sep 20 22:22:43 2017 -0700

--
 .../impl/pb/RpcClientFactoryPBImpl.java |  3 +-
 .../impl/pb/RpcServerFactoryPBImpl.java |  5 +-
 .../impl/pb/TestRpcClientFactoryPBImpl.java | 49 
 .../impl/pb/TestRpcServerFactoryPBImpl.java | 48 +++
 4 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b336632/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 062fa66..07c5e23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -45,7 +45,6 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
   
   private static final RpcClientFactoryPBImpl self = new 
RpcClientFactoryPBImpl();
-  private Configuration localConf = new Configuration();
   private ConcurrentMap cache = new 
ConcurrentHashMap();
   
   public static RpcClientFactoryPBImpl get() {
@@ -62,7 +61,7 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
 if (constructor == null) {
   Class pbClazz = null;
   try {
-pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+pbClazz = conf.getClassByName(getPBImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
 + getPBImplClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b336632/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index 60e549a..ec9a5f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -51,7 +51,6 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
   
   private static final RpcServerFactoryPBImpl self = new 
RpcServerFactoryPBImpl();
 
-  private Configuration localConf = new Configuration();
   private ConcurrentMap serviceCache = new 
ConcurrentHashMap();
   private ConcurrentMap protoCache = new 
ConcurrentHashMap();
   
@@ -80,7 +79,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (constructor == null) {
   Class pbServiceImplClazz = null;
   try {
-pbServiceImplClazz = localConf
+pbServiceImplClazz = conf
 .getClassByName(getPbServiceImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
@@ -113,7 +112,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (method == null) {
   Class protoClazz = null;
   try {
-protoClazz = localConf.getClassByName(getProtoClassName(protocol));
+protoClazz = conf.getClassByName(getProtoClassName(protocol));
   } catch 

hadoop git commit: Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)" HADOOP-14879 Build failure due to failing hadoop-client-check-invariants This r

2017-09-20 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 835717210 -> 4767d1bb1


Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay 
Kumar via Haibo Chen)"
HADOOP-14879 Build failure due to failing hadoop-client-check-invariants
This reverts commit 1ee25278c891e95ba2ab142e5b78aebd752ea163.

(cherry picked from commit aa6e8d2dff533c3d0c86776567c860548723c21c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4767d1bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4767d1bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4767d1bb

Branch: refs/heads/branch-3.0
Commit: 4767d1bb1fca46e572d81db79bea83d5289879bc
Parents: 8357172
Author: Steve Loughran 
Authored: Tue Sep 19 11:53:11 2017 +0100
Committer: Andrew Wang 
Committed: Wed Sep 20 21:49:34 2017 -0700

--
 hadoop-client-modules/hadoop-client/pom.xml | 31 
 1 file changed, 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4767d1bb/hadoop-client-modules/hadoop-client/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client/pom.xml 
b/hadoop-client-modules/hadoop-client/pom.xml
index 8505d50..c465d07 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -179,37 +179,6 @@
 
 
   org.apache.hadoop
-  hadoop-yarn-client
-  compile
-  
-
-
-  org.apache.hadoop
-  hadoop-yarn-api
-
-
-  org.apache.hadoop
-  hadoop-annotations
-
-
-  com.google.guava
-  guava
-
-
-  commons-cli
-  commons-cli
-
-
-  log4j
-  log4j
-
-  
-
-
-
-  org.apache.hadoop
   hadoop-mapreduce-client-core
   compile
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák."

2017-09-20 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1792093ba -> 835717210


Revert "MAPREDUCE-6947. Moving logging APIs over to slf4j in 
hadoop-mapreduce-examples. Contributed by Gergery Novák."

This reverts commit 9c90400c9677c25f3e11ee4366da093bfa6c89fe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83571721
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83571721
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83571721

Branch: refs/heads/branch-3.0
Commit: 835717210a653a397f5fd735ed5d8c299ac0e5b2
Parents: 1792093
Author: Akira Ajisaka 
Authored: Thu Sep 21 11:17:35 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Sep 21 11:17:35 2017 +0900

--
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 +++
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 +++
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 ---
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraOutputFormat.java | 7 +++
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java| 7 +++
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java| 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java | 6 +++---
 9 files changed, 28 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83571721/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index da4ec79..7e98d7d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,6 +29,8 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -49,8 +51,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -83,8 +83,7 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Logger LOG =
-  LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
+  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/83571721/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 7b73820..8dec39d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -29,6 +29,8 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Random;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
@@ -47,8 +49,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.hsqldb.server.Server;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * This is a demonstrative program, which uses DBInputFormat for 

hadoop git commit: Revert "MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák."

2017-09-20 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3bb23f4be -> 7d747df52


Revert "MAPREDUCE-6947. Moving logging APIs over to slf4j in 
hadoop-mapreduce-examples. Contributed by Gergery Novák."

This reverts commit e61baf94965e25b6a7ab2264523142d066b490b2.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d747df5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d747df5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d747df5

Branch: refs/heads/branch-2
Commit: 7d747df52d0a5ff77c0694db36c2da816bbe2779
Parents: 3bb23f4
Author: Akira Ajisaka 
Authored: Thu Sep 21 11:16:48 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Sep 21 11:16:48 2017 +0900

--
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 +++
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 +++
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 ---
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java| 7 +++
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java| 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java | 6 +++---
 8 files changed, 25 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d747df5/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index da4ec79..7e98d7d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,6 +29,8 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -49,8 +51,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -83,8 +83,7 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Logger LOG =
-  LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
+  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d747df5/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 54a5ba4..1ec8739 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -30,6 +30,8 @@ import java.sql.Statement;
 import java.util.Iterator;
 import java.util.Random;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
@@ -48,8 +50,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.hsqldb.server.Server;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * This is a demonstrative program, which uses DBInputFormat for reading
@@ -78,8 +78,7 @@ import org.slf4j.LoggerFactory;
  */
 public class 

hadoop git commit: Revert "MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák."

2017-09-20 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk a12f09ba3 -> 53047f934


Revert "MAPREDUCE-6947. Moving logging APIs over to slf4j in 
hadoop-mapreduce-examples. Contributed by Gergery Novák."

This reverts commit 2018538fdba1a95a6556187569e872fce7f9e1c3.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53047f93
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53047f93
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53047f93

Branch: refs/heads/trunk
Commit: 53047f934e3f81237ac9f0d75dddfc44862ef2d9
Parents: a12f09b
Author: Akira Ajisaka 
Authored: Thu Sep 21 11:16:05 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Sep 21 11:16:05 2017 +0900

--
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 +++
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 +++
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 ---
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraOutputFormat.java | 7 +++
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java| 7 +++
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java| 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java | 6 +++---
 9 files changed, 28 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index da4ec79..7e98d7d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,6 +29,8 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -49,8 +51,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -83,8 +83,7 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Logger LOG =
-  LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
+  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 7b73820..8dec39d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -29,6 +29,8 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Random;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
@@ -47,8 +49,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.hsqldb.server.Server;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * This is a demonstrative program, which uses DBInputFormat for reading
@@ 

[48/50] [abbrv] hadoop git commit: YARN-5947: Create LeveldbConfigurationStore class using Leveldb as backing store. Contributed by Jonathan Hung

2017-09-20 Thread jhung
YARN-5947: Create LeveldbConfigurationStore class using Leveldb as backing 
store. Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b06711cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b06711cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b06711cb

Branch: refs/heads/YARN-5734
Commit: b06711cb1f6cdbe5f60fbd1f0cbf7890e1ef779d
Parents: e462f10
Author: Xuan 
Authored: Mon Jul 31 16:48:40 2017 -0700
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:40:54 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +
 .../src/main/resources/yarn-default.xml |  29 ++
 .../scheduler/MutableConfigurationProvider.java |   6 +
 .../scheduler/capacity/CapacityScheduler.java   |   3 +
 .../conf/LeveldbConfigurationStore.java | 314 +++
 .../conf/MutableCSConfigurationProvider.java|  38 ++-
 .../capacity/conf/YarnConfigurationStore.java   |  14 +-
 .../conf/TestYarnConfigurationStore.java|   3 +-
 8 files changed, 414 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b06711cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e1062d7..a33d85d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -677,8 +677,21 @@ public class YarnConfiguration extends Configuration {
   public static final String SCHEDULER_CONFIGURATION_STORE_CLASS =
   YARN_PREFIX + "scheduler.configuration.store.class";
   public static final String MEMORY_CONFIGURATION_STORE = "memory";
+  public static final String LEVELDB_CONFIGURATION_STORE = "leveldb";
   public static final String DEFAULT_CONFIGURATION_STORE =
   MEMORY_CONFIGURATION_STORE;
+  public static final String RM_SCHEDCONF_STORE_PATH = YARN_PREFIX
+  + "scheduler.configuration.leveldb-store.path";
+
+  public static final String RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS =
+  YARN_PREFIX
+  + "scheduler.configuration.leveldb-store.compaction-interval-secs";
+  public static final long
+  DEFAULT_RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS = 60 * 60 * 24L;
+
+  public static final String RM_SCHEDCONF_LEVELDB_MAX_LOGS =
+  YARN_PREFIX + "scheduler.configuration.leveldb-store.max-logs";
+  public static final int DEFAULT_RM_SCHEDCONF_LEVELDB_MAX_LOGS = 1000;
 
   public static final String RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS =
   YARN_PREFIX + "scheduler.configuration.mutation.acl-policy.class";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b06711cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 86aa15e..4529f20 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3371,4 +3371,33 @@
 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.DefaultConfigurationMutationACLPolicy
   
 
+  
+
+  The storage path for LevelDB implementation of configuration store,
+  when yarn.scheduler.configuration.store.class is configured to be
+  "leveldb".
+
+yarn.scheduler.configuration.leveldb-store.path
+${hadoop.tmp.dir}/yarn/system/confstore
+  
+
+  
+
+  The compaction interval for LevelDB configuration store in secs,
+  when yarn.scheduler.configuration.store.class is configured to be
+  "leveldb". Default is one day.
+
+
yarn.scheduler.configuration.leveldb-store.compaction-interval-secs
+86400
+  
+
+  
+
+  The max number of configuration change log entries kept in LevelDB config
+  store, when yarn.scheduler.configuration.store.class is configured to be
+  "leveldb". Default is 1000.
+
+yarn.scheduler.configuration.leveldb-store.max-logs
+1000
+  
 


[28/50] [abbrv] hadoop git commit: HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly cannot finish in 60s. (SammiChen via lei)

2017-09-20 Thread jhung
HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly 
cannot finish in 60s. (SammiChen via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bbeacb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bbeacb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bbeacb7

Branch: refs/heads/YARN-5734
Commit: 7bbeacb75e93261dbda0e8efcde510e5fcf83efb
Parents: fda1221
Author: Lei Xu 
Authored: Tue Sep 19 11:50:01 2017 -0700
Committer: Lei Xu 
Committed: Tue Sep 19 11:50:01 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bbeacb7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 72b1412..713a10b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -456,8 +456,8 @@ public class TestReconstructStripedFile {
 ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy();
 fs.getClient().setErasureCodingPolicy("/", policy.getName());
 
-final int fileLen = cellSize * ecPolicy.getNumDataUnits() * 2;
-for (int i = 0; i < 100; i++) {
+final int fileLen = cellSize * ecPolicy.getNumDataUnits();
+for (int i = 0; i < 50; i++) {
   writeFile(fs, "/ec-file-" + i, fileLen);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: YARN-5952. Create REST API for changing YARN scheduler configurations. (Jonathan Hung via wangda)

2017-09-20 Thread jhung
YARN-5952. Create REST API for changing YARN scheduler configurations. 
(Jonathan Hung via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbcc60ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbcc60ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbcc60ce

Branch: refs/heads/YARN-5734
Commit: fbcc60ce7d75812fd5957e8fe5b17abf0421a613
Parents: 0de6349
Author: Wangda Tan 
Authored: Mon Apr 3 10:12:01 2017 -0700
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:40:53 2017 -0700

--
 .../scheduler/MutableConfScheduler.java |  40 ++
 .../scheduler/MutableConfigurationProvider.java |   5 +-
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../conf/InMemoryConfigurationStore.java|   6 +-
 .../conf/MutableCSConfigurationProvider.java|  24 +-
 .../resourcemanager/webapp/RMWebServices.java   | 172 ++-
 .../webapp/dao/QueueConfigInfo.java |  57 +++
 .../webapp/dao/QueueConfigsUpdateInfo.java  |  60 +++
 .../TestMutableCSConfigurationProvider.java |   6 +-
 .../TestRMWebServicesConfigurationMutation.java | 477 +++
 10 files changed, 851 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbcc60ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
new file mode 100644
index 000..35e36e1
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.security.UserGroupInformation;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Interface for a scheduler that supports changing configuration at runtime.
+ *
+ */
+public interface MutableConfScheduler extends ResourceScheduler {
+
+  /**
+   * Update the scheduler's configuration.
+   * @param user Caller of this update
+   * @param confUpdate key-value map of the configuration update
+   * @throws IOException if update is invalid
+   */
+  void updateConfiguration(UserGroupInformation user,
+  Map confUpdate) throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbcc60ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index da30a2b..889c3bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ 

[25/50] [abbrv] hadoop git commit: Revert "MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe"

2017-09-20 Thread jhung
Revert "MAPREDUCE-6958. Shuffle audit logger should log size of shuffle 
transfer. Contributed by Jason Lowe"

This reverts commit b3d61304f2fa4a99526f7a60ccaac9f262083079.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea845ba5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea845ba5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea845ba5

Branch: refs/heads/YARN-5734
Commit: ea845ba58c585647c4be8d30d9b814f098e34a12
Parents: aa6e8d2
Author: Jason Lowe 
Authored: Tue Sep 19 08:45:05 2017 -0500
Committer: Jason Lowe 
Committed: Tue Sep 19 08:45:05 2017 -0500

--
 .../org/apache/hadoop/mapred/ShuffleHandler.java  | 18 +++---
 1 file changed, 7 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea845ba5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 06a3e42..863da7e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,6 +992,13 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
 
+  // this audit log is disabled by default,
+  // to turn it on please enable this audit log
+  // on log4j.properties by uncommenting the setting
+  if (AUDITLOG.isDebugEnabled()) {
+AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
+ " reducer " + reduceQ.get(0));
+  }
   int reduceId;
   String jobId;
   try {
@@ -1176,17 +1183,6 @@ public class ShuffleHandler extends AuxiliaryService {
 
   // Now set the response headers.
   setResponseHeaders(response, keepAliveParam, contentLength);
-
-  // this audit log is disabled by default,
-  // to turn it on please enable this audit log
-  // on log4j.properties by uncommenting the setting
-  if (AUDITLOG.isDebugEnabled()) {
-StringBuilder sb = new StringBuilder("shuffle for ").append(jobId);
-sb.append(" mappers: ").append(mapIds);
-sb.append(" reducer ").append(reduce);
-sb.append(" length ").append(contentLength);
-AUDITLOG.debug(sb.toString());
-  }
 }
 
 protected void setResponseHeaders(HttpResponse response,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: YARN-7024: Fix issues on recovery in LevelDB store. Contributed by Jonathan Hung

2017-09-20 Thread jhung
YARN-7024: Fix issues on recovery in LevelDB store. Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c59418fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c59418fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c59418fb

Branch: refs/heads/YARN-5734
Commit: c59418fb19094e9c735e631e098637d3b5eba3b3
Parents: ba53795
Author: Xuan 
Authored: Wed Aug 23 11:11:41 2017 -0700
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:40:54 2017 -0700

--
 .../scheduler/capacity/conf/InMemoryConfigurationStore.java  | 2 +-
 .../scheduler/capacity/conf/LeveldbConfigurationStore.java   | 4 +++-
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c59418fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
index b97be1b..c63734d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -79,7 +79,7 @@ public class InMemoryConfigurationStore implements 
YarnConfigurationStore {
 
   @Override
   public synchronized List getPendingMutations() {
-return pendingMutations;
+return new LinkedList<>(pendingMutations);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c59418fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
index 1534685..1280fab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
@@ -65,6 +65,7 @@ public class LeveldbConfigurationStore implements 
YarnConfigurationStore {
   private static final String LOG_COMMITTED_TXN = "committedTxn";
 
   private DB db;
+  // Txnid for the last transaction logged to the store.
   private long txnId = 0;
   private long minTxn = 0;
   private long maxLogs;
@@ -92,6 +93,7 @@ public class LeveldbConfigurationStore implements 
YarnConfigurationStore {
   break;
 }
 pendingMutations.add(deserLogMutation(entry.getValue()));
+txnId++;
   }
   // Get the earliest txnId stored in logs
   itr.seekToFirst();
@@ -278,7 +280,7 @@ public class LeveldbConfigurationStore implements 
YarnConfigurationStore {
 
   @Override
   public List getPendingMutations() {
-return pendingMutations;
+return new LinkedList<>(pendingMutations);
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: YARN-6499. Remove the doc about Schedulable#redistributeShare(). (Contributed by Chetna Chaudhari via Yufei Gu)

2017-09-20 Thread jhung
YARN-6499. Remove the doc about Schedulable#redistributeShare(). (Contributed 
by Chetna Chaudhari via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9019e1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9019e1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9019e1f

Branch: refs/heads/YARN-5734
Commit: a9019e1fb753f15c1927e3f9355996fd6544c14f
Parents: 647b752
Author: Yufei Gu 
Authored: Tue Sep 19 18:27:37 2017 -0700
Committer: Yufei Gu 
Committed: Tue Sep 19 18:28:31 2017 -0700

--
 .../yarn/server/resourcemanager/scheduler/fair/Schedulable.java  | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9019e1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
index 4d6af98..bd1ff7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
@@ -41,10 +41,6 @@ import org.apache.hadoop.yarn.api.records.Resource;
  * - updateDemand() is called periodically to compute the demand of the various
  *   jobs and queues, which may be expensive (e.g. jobs must iterate through 
all
  *   their tasks to count failed tasks, tasks that can be speculated, etc).
- * - redistributeShare() is called after demands are updated and a 
Schedulable's
- *   fair share has been set by its parent to let it distribute its share among
- *   the other Schedulables within it (e.g. for queues that want to perform 
fair
- *   sharing among their jobs).
  */
 @Private
 @Unstable


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)

2017-09-20 Thread jhung
HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar 
via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ee25278
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ee25278
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ee25278

Branch: refs/heads/YARN-5734
Commit: 1ee25278c891e95ba2ab142e5b78aebd752ea163
Parents: 7c73292
Author: Haibo Chen 
Authored: Mon Sep 18 14:25:35 2017 -0700
Committer: Haibo Chen 
Committed: Mon Sep 18 14:25:35 2017 -0700

--
 hadoop-client-modules/hadoop-client/pom.xml | 31 
 1 file changed, 31 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ee25278/hadoop-client-modules/hadoop-client/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client/pom.xml 
b/hadoop-client-modules/hadoop-client/pom.xml
index bed3f5c..6500ebf 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -179,6 +179,37 @@
 
 
   org.apache.hadoop
+  hadoop-yarn-client
+  compile
+  
+
+
+  org.apache.hadoop
+  hadoop-yarn-api
+
+
+  org.apache.hadoop
+  hadoop-annotations
+
+
+  com.google.guava
+  guava
+
+
+  commons-cli
+  commons-cli
+
+
+  log4j
+  log4j
+
+  
+
+
+
+  org.apache.hadoop
   hadoop-mapreduce-client-core
   compile
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: YARN-7199. Fix TestAMRMClientContainerRequest.testOpportunisticAndGuaranteedRequests. (Botong Huang via asuresh)

2017-09-20 Thread jhung
YARN-7199. Fix 
TestAMRMClientContainerRequest.testOpportunisticAndGuaranteedRequests. (Botong 
Huang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29dd5515
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29dd5515
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29dd5515

Branch: refs/heads/YARN-5734
Commit: 29dd55153e37471d9c177f4bd173f1d02bc96410
Parents: 0adc047
Author: Arun Suresh 
Authored: Mon Sep 18 11:26:44 2017 -0700
Committer: Arun Suresh 
Committed: Mon Sep 18 11:26:44 2017 -0700

--
 .../java/org/apache/hadoop/yarn/client/api/AMRMClient.java  | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29dd5515/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index 815915e..e86bd12 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -118,8 +118,8 @@ public abstract class AMRMClient extends
 private String nodeLabelsExpression;
 private ExecutionTypeRequest executionTypeRequest =
 ExecutionTypeRequest.newInstance();
-private String resourceProfile;
-
+private String resourceProfile = ProfileCapability.DEFAULT_PROFILE;
+
 /**
  * Instantiates a {@link ContainerRequest} with the given constraints and
  * locality relaxation enabled.
@@ -540,6 +540,11 @@ public abstract class AMRMClient extends
 return this;
   }
 
+  public ContainerRequestBuilder resourceProfile(String resourceProfile) {
+containerRequest.resourceProfile = resourceProfile;
+return this;
+  }
+
   public ContainerRequest build() {
 containerRequest.sanityCheck();
 return containerRequest;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: YARN-5946: Create YarnConfigurationStore interface and InMemoryConfigurationStore class. Contributed by Jonathan Hung

2017-09-20 Thread jhung
YARN-5946: Create YarnConfigurationStore interface and
InMemoryConfigurationStore class. Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d9ba97e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d9ba97e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d9ba97e

Branch: refs/heads/YARN-5734
Commit: 8d9ba97e0e3a6ec1d722d011db6fba223c07a691
Parents: a6a9bae
Author: Xuan 
Authored: Fri Feb 24 15:58:12 2017 -0800
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:39:06 2017 -0700

--
 .../conf/InMemoryConfigurationStore.java|  86 +++
 .../capacity/conf/YarnConfigurationStore.java   | 154 +++
 .../conf/TestYarnConfigurationStore.java|  70 +
 3 files changed, 310 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d9ba97e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
new file mode 100644
index 000..a208fb9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A default implementation of {@link YarnConfigurationStore}. Doesn't offer
+ * persistent configuration storage, just stores the configuration in memory.
+ */
+public class InMemoryConfigurationStore implements YarnConfigurationStore {
+
+  private Configuration schedConf;
+  private LinkedList pendingMutations;
+  private long pendingId;
+
+  @Override
+  public void initialize(Configuration conf, Configuration schedConf) {
+this.schedConf = schedConf;
+this.pendingMutations = new LinkedList<>();
+this.pendingId = 0;
+  }
+
+  @Override
+  public synchronized long logMutation(LogMutation logMutation) {
+logMutation.setId(++pendingId);
+pendingMutations.add(logMutation);
+return pendingId;
+  }
+
+  @Override
+  public synchronized boolean confirmMutation(long id, boolean isValid) {
+LogMutation mutation = pendingMutations.poll();
+// If confirmMutation is called out of order, discard mutations until id
+// is reached.
+while (mutation != null) {
+  if (mutation.getId() == id) {
+if (isValid) {
+  Map mutations = mutation.getUpdates();
+  for (Map.Entry kv : mutations.entrySet()) {
+schedConf.set(kv.getKey(), kv.getValue());
+  }
+}
+return true;
+  }
+  mutation = pendingMutations.poll();
+}
+return false;
+  }
+
+  @Override
+  public synchronized Configuration retrieve() {
+return schedConf;
+  }
+
+  @Override
+  public synchronized List getPendingMutations() {
+return pendingMutations;
+  }
+
+  @Override
+  public List getConfirmedConfHistory(long fromId) {
+// Unimplemented.
+return null;
+  }
+}


[22/50] [abbrv] hadoop git commit: HDFS-11799. Introduce a config to allow setting up write pipeline with fewer nodes than replication factor. Contributed by Brahma Reddy Battula

2017-09-20 Thread jhung
HDFS-11799. Introduce a config to allow setting up write pipeline with fewer 
nodes than replication factor. Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fda1221c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fda1221c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fda1221c

Branch: refs/heads/YARN-5734
Commit: fda1221c55101d97ac62e1ee4e3ddf9a915d5363
Parents: 31b5840
Author: Brahma Reddy Battula 
Authored: Tue Sep 19 11:25:45 2017 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Sep 19 11:25:45 2017 +0530

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  13 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java|  31 +-
 .../hdfs/client/HdfsClientConfigKeys.java   |   2 +
 .../src/main/resources/hdfs-default.xml |  17 ++
 .../TestReplaceDatanodeFailureReplication.java  | 291 +++
 .../hadoop/tools/TestHdfsConfigFields.java  |   4 +-
 6 files changed, 354 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 772049d..7e8e95b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -223,6 +223,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   final String clientName;
   final SocketFactory socketFactory;
   final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
+  final short dtpReplaceDatanodeOnFailureReplication;
   private final FileSystem.Statistics stats;
   private final URI namenodeUri;
   private final Random r = new Random();
@@ -305,7 +306,17 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
 this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
 this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
-
+this.dtpReplaceDatanodeOnFailureReplication = (short) conf
+.getInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+MIN_REPLICATION,
+HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+MIN_REPLICATION_DEFAULT);
+if (LOG.isDebugEnabled()) {
+  LOG.debug(
+  "Sets " + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+  MIN_REPLICATION + " to "
+  + dtpReplaceDatanodeOnFailureReplication);
+}
 this.ugi = UserGroupInformation.getCurrentUser();
 
 this.namenodeUri = nameNodeUri;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 4eafca1..99fa5f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -1384,7 +1384,36 @@ class DataStreamer extends Daemon {
   setPipeline(lb);
 
   //find the new datanode
-  final int d = findNewDatanode(original);
+  final int d;
+  try {
+d = findNewDatanode(original);
+  } catch (IOException ioe) {
+// check the minimal number of nodes available to decide whether to
+// continue the write.
+
+//if live block location datanodes is greater than or equal to
+// HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+// MIN_REPLICATION threshold value, continue writing to the
+// remaining nodes. Otherwise throw exception.
+//
+// If HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+// MIN_REPLICATION is set to 0 or less than zero, an exception will be
+// thrown if a replacement could not be found.
+
+if (dfsClient.dtpReplaceDatanodeOnFailureReplication > 0 && 
nodes.length
+>= dfsClient.dtpReplaceDatanodeOnFailureReplication) {
+  DFSClient.LOG.warn(
+  "Failed to find a new datanode 

[03/50] [abbrv] hadoop git commit: HDFS-10701. TestDFSStripedOutputStreamWithFailure#testBlockTokenExpired occasionally fails. Contributed by SammiChen.

2017-09-20 Thread jhung
HDFS-10701. TestDFSStripedOutputStreamWithFailure#testBlockTokenExpired 
occasionally fails. Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef8cd5dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef8cd5dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef8cd5dc

Branch: refs/heads/YARN-5734
Commit: ef8cd5dc565f901b4954befe784675e130e84c3c
Parents: 1a84c24
Author: Andrew Wang 
Authored: Fri Sep 15 16:20:36 2017 -0700
Committer: Andrew Wang 
Committed: Fri Sep 15 16:20:36 2017 -0700

--
 .../hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef8cd5dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index ea889e3..57da439 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -260,8 +260,6 @@ public class TestDFSStripedOutputStreamWithFailure {
 
   @Test(timeout=24)
   public void testBlockTokenExpired() throws Exception {
-// TODO: this is very flaky, re-enable it later. See HDFS-12417.
-assumeTrue("Test has been temporarily disabled. See HDFS-12417.", false);
 final int length = dataBlocks * (blockSize - cellSize);
 final HdfsConfiguration conf = newHdfsConfiguration();
 
@@ -494,8 +492,8 @@ public class TestDFSStripedOutputStreamWithFailure {
   final BlockManager bm = nn.getNamesystem().getBlockManager();
   final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
 
-  // set a short token lifetime (1 second)
-  SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
+  // set a short token lifetime (6 second)
+  SecurityTestUtil.setBlockTokenLifetime(sm, 6000L);
 }
 
 final AtomicInteger pos = new AtomicInteger();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: MAPREDUCE-6960. Shuffle Handler prints disk error stack traces for every read failure.

2017-09-20 Thread jhung
MAPREDUCE-6960. Shuffle Handler prints disk error stack traces for every read 
failure.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/595d4784
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/595d4784
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/595d4784

Branch: refs/heads/YARN-5734
Commit: 595d478408104bdfe1f08efd79930e18862fafbb
Parents: 3a20deb
Author: Eric Payne 
Authored: Tue Sep 19 10:35:15 2017 -0500
Committer: Eric Payne 
Committed: Tue Sep 19 10:35:15 2017 -0500

--
 .../main/java/org/apache/hadoop/mapred/ShuffleHandler.java| 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/595d4784/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index b7f2c6d..0eeae19 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -80,6 +80,7 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -1088,7 +1089,11 @@ public class ShuffleHandler extends AuxiliaryService {
   }
   nextMap.addListener(new ReduceMapFileCount(reduceContext));
 } catch (IOException e) {
-  LOG.error("Shuffle error :", e);
+  if (e instanceof DiskChecker.DiskErrorException) {
+LOG.error("Shuffle error :" + e);
+  } else {
+LOG.error("Shuffle error :", e);
+  }
   String errorMessage = getErrorMessage(e);
   sendError(reduceContext.getCtx(), errorMessage,
   INTERNAL_SERVER_ERROR);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: Revert "YARN-7162. Remove XML excludes file format (rkanter)" - wrong commit message

2017-09-20 Thread jhung
Revert "YARN-7162. Remove XML excludes file format (rkanter)" - wrong commit 
message

This reverts commit 3a8d57a0a2e047b34be82f602a2b6cf5593d2125.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f496683
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f496683
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f496683

Branch: refs/heads/YARN-5734
Commit: 5f496683fb00ba26a6bf5a506ae87d4bc4088727
Parents: a2dcba1
Author: Robert Kanter 
Authored: Mon Sep 18 10:32:08 2017 -0700
Committer: Robert Kanter 
Committed: Mon Sep 18 10:32:08 2017 -0700

--
 .../hadoop-mapreduce-client-core/pom.xml|  4 --
 .../hadoop/mapreduce/JobResourceUploader.java   | 17 
 .../apache/hadoop/mapreduce/MRJobConfig.java|  5 ---
 .../src/main/resources/mapred-default.xml   |  9 
 .../mapreduce/TestJobResourceUploader.java  | 46 
 5 files changed, 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index ce5fdc8..c34f7bd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -44,10 +44,6 @@
 
 
   org.apache.hadoop
-  hadoop-hdfs-client
-
-
-  org.apache.hadoop
   hadoop-hdfs
   test
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index d9bf988..f1cad57 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -36,8 +36,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 
@@ -96,11 +94,6 @@ class JobResourceUploader {
 new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
 mkdirs(jtFs, submitJobDir, mapredSysPerms);
 
-if (!conf.getBoolean(MRJobConfig.MR_AM_STAGING_DIR_ERASURECODING_ENABLED,
-MRJobConfig.DEFAULT_MR_AM_STAGING_ERASURECODING_ENABLED)) {
-  disableErasureCodingForPath(jtFs, submitJobDir);
-}
-
 Collection files = conf.getStringCollection("tmpfiles");
 Collection libjars = conf.getStringCollection("tmpjars");
 Collection archives = conf.getStringCollection("tmparchives");
@@ -582,14 +575,4 @@ class JobResourceUploader {
 }
 return finalPath;
   }
-
-  private void disableErasureCodingForPath(FileSystem fs, Path path)
-  throws IOException {
-if (jtFs instanceof DistributedFileSystem) {
-  LOG.info("Disabling Erasure Coding for path: " + path);
-  DistributedFileSystem dfs = (DistributedFileSystem) jtFs;
-  dfs.setErasureCodingPolicy(path,
-  SystemErasureCodingPolicies.getReplicationPolicy().getName());
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 86abb42..2023ba3 100644
--- 

[50/50] [abbrv] hadoop git commit: YARN-6840. Implement zookeeper based store for scheduler configuration updates. (Jonathan Hung via wangda)

2017-09-20 Thread jhung
YARN-6840. Implement zookeeper based store for scheduler configuration updates. 
(Jonathan Hung via wangda)

Change-Id: I9debea674fe8c7e4109d4ca136965a1ea4c48bcc


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/034e6f4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/034e6f4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/034e6f4f

Branch: refs/heads/YARN-5734
Commit: 034e6f4f80046ace4987da021dee8f8d2533a1f3
Parents: c59418f
Author: Wangda Tan 
Authored: Mon Sep 18 09:53:42 2017 -0700
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:40:54 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  14 +-
 .../src/main/resources/yarn-default.xml |  15 +-
 .../server/resourcemanager/AdminService.java|  18 +-
 .../server/resourcemanager/ResourceManager.java |  24 +-
 .../RMStateVersionIncompatibleException.java|   2 +-
 .../recovery/ZKRMStateStore.java|   5 +-
 .../scheduler/MutableConfScheduler.java |  22 +-
 .../scheduler/MutableConfigurationProvider.java |  36 ++-
 .../scheduler/capacity/CapacityScheduler.java   |  22 +-
 .../conf/InMemoryConfigurationStore.java|  71 +++--
 .../conf/LeveldbConfigurationStore.java | 168 +-
 .../conf/MutableCSConfigurationProvider.java| 148 +
 .../capacity/conf/YarnConfigurationStore.java   | 132 
 .../capacity/conf/ZKConfigurationStore.java | 235 ++
 .../resourcemanager/webapp/RMWebServices.java   |  26 +-
 .../conf/ConfigurationStoreBaseTest.java|  90 ++
 .../conf/TestInMemoryConfigurationStore.java|  30 ++
 .../TestMutableCSConfigurationProvider.java |  18 +-
 .../conf/TestYarnConfigurationStore.java|  71 -
 .../capacity/conf/TestZKConfigurationStore.java | 312 +++
 20 files changed, 1037 insertions(+), 422 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/034e6f4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a33d85d..6e6089f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -678,6 +678,7 @@ public class YarnConfiguration extends Configuration {
   YARN_PREFIX + "scheduler.configuration.store.class";
   public static final String MEMORY_CONFIGURATION_STORE = "memory";
   public static final String LEVELDB_CONFIGURATION_STORE = "leveldb";
+  public static final String ZK_CONFIGURATION_STORE = "zk";
   public static final String DEFAULT_CONFIGURATION_STORE =
   MEMORY_CONFIGURATION_STORE;
   public static final String RM_SCHEDCONF_STORE_PATH = YARN_PREFIX
@@ -689,9 +690,16 @@ public class YarnConfiguration extends Configuration {
   public static final long
   DEFAULT_RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS = 60 * 60 * 24L;
 
-  public static final String RM_SCHEDCONF_LEVELDB_MAX_LOGS =
-  YARN_PREFIX + "scheduler.configuration.leveldb-store.max-logs";
-  public static final int DEFAULT_RM_SCHEDCONF_LEVELDB_MAX_LOGS = 1000;
+  public static final String RM_SCHEDCONF_MAX_LOGS =
+  YARN_PREFIX + "scheduler.configuration.store.max-logs";
+  public static final long DEFAULT_RM_SCHEDCONF_LEVELDB_MAX_LOGS = 1000;
+  public static final long DEFAULT_RM_SCHEDCONF_ZK_MAX_LOGS = 1000;
+
+  /** Parent znode path under which ZKConfigurationStore will create znodes. */
+  public static final String RM_SCHEDCONF_STORE_ZK_PARENT_PATH = YARN_PREFIX
+  + "scheduler.configuration.zk-store.parent-path";
+  public static final String DEFAULT_RM_SCHEDCONF_STORE_ZK_PARENT_PATH =
+  "/confstore";
 
   public static final String RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS =
   YARN_PREFIX + "scheduler.configuration.mutation.acl-policy.class";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/034e6f4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 4529f20..0ccf6f4 100644
--- 

[15/50] [abbrv] hadoop git commit: YARN-6570. No logs were found for running application, running container. Contributed by Junping Du

2017-09-20 Thread jhung
YARN-6570. No logs were found for running application, running
container. Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c732924
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c732924
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c732924

Branch: refs/heads/YARN-5734
Commit: 7c732924a889cd280e972882619a1827877fbafa
Parents: 29dd551
Author: Xuan 
Authored: Mon Sep 18 14:04:05 2017 -0700
Committer: Xuan 
Committed: Mon Sep 18 14:04:05 2017 -0700

--
 .../nodemanager/containermanager/container/ContainerImpl.java | 1 +
 .../org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java  | 3 ++-
 .../nodemanager/containermanager/container/TestContainer.java | 3 +++
 3 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c732924/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index df107a7..836e70e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -616,6 +616,7 @@ public class ContainerImpl implements Container {
   public org.apache.hadoop.yarn.api.records.ContainerState getCurrentState() {
 switch (stateMachine.getCurrentState()) {
 case NEW:
+  return org.apache.hadoop.yarn.api.records.ContainerState.NEW;
 case LOCALIZING:
 case LOCALIZATION_FAILED:
 case SCHEDULED:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c732924/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
index 8e4522b..9e59449 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
@@ -159,7 +159,8 @@ public class TestEventFlow {
 containerManager.startContainers(allRequests);
 
 BaseContainerManagerTest.waitForContainerState(containerManager, cID,
-Arrays.asList(ContainerState.RUNNING, ContainerState.SCHEDULED), 20);
+Arrays.asList(ContainerState.RUNNING, ContainerState.SCHEDULED,
+ContainerState.NEW), 20);
 
 List containerIds = new ArrayList();
 containerIds.add(cID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c732924/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
index 64e6cf0..b44b500 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
+++ 

[21/50] [abbrv] hadoop git commit: HDFS-12480. TestNameNodeMetrics#testTransactionAndCheckpointMetrics Fails in trunk. Contributed by Hanisha Koneru

2017-09-20 Thread jhung
HDFS-12480. TestNameNodeMetrics#testTransactionAndCheckpointMetrics Fails in 
trunk. Contributed by Hanisha Koneru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31b58406
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31b58406
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31b58406

Branch: refs/heads/YARN-5734
Commit: 31b58406ac369716ef1665b7d60a3409117bdf9d
Parents: 595d478
Author: Brahma Reddy Battula 
Authored: Tue Sep 19 10:37:07 2017 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Sep 19 10:37:07 2017 +0530

--
 .../namenode/metrics/TestNameNodeMetrics.java | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31b58406/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 077a5f8..db9adbe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -851,22 +851,22 @@ public class TestNameNodeMetrics {
 getMetrics(NS_METRICS));
 
 assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
-assertGauge("LastWrittenTransactionId", 3L, getMetrics(NS_METRICS));
-assertGauge("TransactionsSinceLastCheckpoint", 3L, getMetrics(NS_METRICS));
-assertGauge("TransactionsSinceLastLogRoll", 3L, getMetrics(NS_METRICS));
+assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
+assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
+assertGauge("TransactionsSinceLastLogRoll", 4L, getMetrics(NS_METRICS));
 
 fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
 
 assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
-assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
-assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
-assertGauge("TransactionsSinceLastLogRoll", 4L, getMetrics(NS_METRICS));
+assertGauge("LastWrittenTransactionId", 5L, getMetrics(NS_METRICS));
+assertGauge("TransactionsSinceLastCheckpoint", 5L, getMetrics(NS_METRICS));
+assertGauge("TransactionsSinceLastLogRoll", 5L, getMetrics(NS_METRICS));
 
 cluster.getNameNodeRpc().rollEditLog();
 
 assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
-assertGauge("LastWrittenTransactionId", 6L, getMetrics(NS_METRICS));
-assertGauge("TransactionsSinceLastCheckpoint", 6L, getMetrics(NS_METRICS));
+assertGauge("LastWrittenTransactionId", 7L, getMetrics(NS_METRICS));
+assertGauge("TransactionsSinceLastCheckpoint", 7L, getMetrics(NS_METRICS));
 assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
 
 cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
@@ -876,7 +876,7 @@ public class TestNameNodeMetrics {
 long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
 getMetrics(NS_METRICS));
 assertTrue(lastCkptTime < newLastCkptTime);
-assertGauge("LastWrittenTransactionId", 8L, getMetrics(NS_METRICS));
+assertGauge("LastWrittenTransactionId", 9L, getMetrics(NS_METRICS));
 assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
 assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: YARN-6977. Node information is not provided for non am containers in RM logs. (Suma Shivaprasad via wangda)

2017-09-20 Thread jhung
YARN-6977. Node information is not provided for non am containers in RM logs. 
(Suma Shivaprasad via wangda)

Change-Id: I0c44d09a560446dee2ba68c2b9ae69fce0ec1d3e
(cherry picked from commit 8a42e922fad613f3cf1cc6cb0f3fa72546a9cc56)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/958e8c0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/958e8c0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/958e8c0e

Branch: refs/heads/YARN-5734
Commit: 958e8c0e257216c82f68fee726e5280a919da94a
Parents: ef8cd5d
Author: Wangda Tan 
Authored: Fri Sep 15 21:24:11 2017 -0700
Committer: Wangda Tan 
Committed: Fri Sep 15 21:29:31 2017 -0700

--
 .../resourcemanager/scheduler/SchedulerNode.java   |  8 
 .../scheduler/common/fica/FiCaSchedulerNode.java   | 13 +
 .../scheduler/fair/FSSchedulerNode.java| 10 ++
 3 files changed, 23 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/958e8c0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index 272537c..90fa3e4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -170,14 +170,6 @@ public abstract class SchedulerNode {
 
 launchedContainers.put(container.getId(),
 new ContainerInfo(rmContainer, launchedOnNode));
-
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Assigned container " + container.getId() + " of capacity "
-  + container.getResource() + " on host " + rmNode.getNodeAddress()
-  + ", which has " + numContainers + " containers, "
-  + getAllocatedResource() + " used and " + 
getUnallocatedResource()
-  + " available after allocation");
-}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/958e8c0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
index c26a11b..729 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
@@ -160,4 +160,17 @@ public class FiCaSchedulerNode extends SchedulerNode {
   public synchronized Map getKillableContainers() {
 return Collections.unmodifiableMap(killableContainers);
   }
+
+  protected synchronized void allocateContainer(RMContainer rmContainer,
+  boolean launchedOnNode) {
+super.allocateContainer(rmContainer, launchedOnNode);
+
+final Container container = rmContainer.getContainer();
+LOG.info("Assigned container " + container.getId() + " of capacity "
+  + container.getResource() + " on host " + 
getRMNode().getNodeAddress()
+  + ", which has " + getNumContainers() + " containers, "
+  + getAllocatedResource() + " used and " + getUnallocatedResource()
+  + " available after allocation");
+  }
+
 }


[39/50] [abbrv] hadoop git commit: YARN-5951. Changes to allow CapacityScheduler to use configuration store

2017-09-20 Thread jhung
YARN-5951. Changes to allow CapacityScheduler to use configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6a9bae3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6a9bae3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6a9bae3

Branch: refs/heads/YARN-5734
Commit: a6a9bae3057c39a6acf06514d23a950a45e7f08f
Parents: a12f09b
Author: Jonathan Hung 
Authored: Mon Jan 30 19:03:48 2017 -0800
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:39:06 2017 -0700

--
 .../scheduler/capacity/CapacityScheduler.java   | 37 +--
 .../CapacitySchedulerConfiguration.java |  9 +++
 .../capacity/conf/CSConfigurationProvider.java  | 46 ++
 .../conf/FileBasedCSConfigurationProvider.java  | 67 
 .../scheduler/capacity/conf/package-info.java   | 29 +
 .../capacity/TestCapacityScheduler.java |  4 +-
 6 files changed, 170 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6a9bae3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 7f50272..e74cbe1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -103,6 +102,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -167,6 +168,8 @@ public class CapacityScheduler extends
 
   private int maxAssignPerHeartbeat;
 
+  private CSConfigurationProvider csConfProvider;
+
   @Override
   public void setConf(Configuration conf) {
   yarnConf = conf;
@@ -289,7 +292,18 @@ public class CapacityScheduler extends
   IOException {
 try {
   writeLock.lock();
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  String confProviderStr = configuration.get(
+  CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+  CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
+  if (confProviderStr.equals(
+  CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
+this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
+  } else {
+throw new IOException("Invalid CS configuration provider: " +
+confProviderStr);
+  }
+  this.csConfProvider.init(configuration);
+  this.conf = this.csConfProvider.loadConfiguration(configuration);
   validateConf(this.conf);
   this.minimumAllocation = super.getMinimumAllocation();
   initMaximumResourceCapability(super.getMaximumAllocation());
@@ -399,7 +413,7 @@ public class CapacityScheduler extends
   writeLock.lock();
   Configuration configuration = new Configuration(newConf);
   CapacitySchedulerConfiguration oldConf = this.conf;
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  this.conf = 

[44/50] [abbrv] hadoop git commit: YARN-5953 addendum: Move QueueConfigInfo and SchedConfUpdateInfo to package org.apache.hadoop.yarn.webapp.dao

2017-09-20 Thread jhung
YARN-5953 addendum: Move QueueConfigInfo and SchedConfUpdateInfo to package 
org.apache.hadoop.yarn.webapp.dao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e462f10e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e462f10e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e462f10e

Branch: refs/heads/YARN-5734
Commit: e462f10e65ada949bac159a8a7317b346e2a36e5
Parents: 6684c9a
Author: Xuan 
Authored: Mon Jul 31 11:49:05 2017 -0700
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:40:53 2017 -0700

--
 .../hadoop/yarn/webapp/dao/QueueConfigInfo.java | 57 +
 .../yarn/webapp/dao/SchedConfUpdateInfo.java| 85 
 .../webapp/dao/QueueConfigInfo.java | 57 -
 .../webapp/dao/SchedConfUpdateInfo.java | 85 
 4 files changed, 142 insertions(+), 142 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e462f10e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/QueueConfigInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/QueueConfigInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/QueueConfigInfo.java
new file mode 100644
index 000..d1d91c2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/QueueConfigInfo.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.webapp.dao;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for adding or updating a queue to scheduler configuration
+ * for this queue.
+ */
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class QueueConfigInfo {
+
+  @XmlElement(name = "queueName")
+  private String queue;
+
+  private HashMap params = new HashMap<>();
+
+  public QueueConfigInfo() { }
+
+  public QueueConfigInfo(String queue, Map params) {
+this.queue = queue;
+this.params = new HashMap<>(params);
+  }
+
+  public String getQueue() {
+return this.queue;
+  }
+
+  public HashMap getParams() {
+return this.params;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e462f10e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/SchedConfUpdateInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/SchedConfUpdateInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/SchedConfUpdateInfo.java
new file mode 100644
index 000..bb84096
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/SchedConfUpdateInfo.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY 

[24/50] [abbrv] hadoop git commit: Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)" HADOOP-14879 Build failure due to failing hadoop-client-check-i

2017-09-20 Thread jhung
Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay 
Kumar via Haibo Chen)"
HADOOP-14879 Build failure due to failing hadoop-client-check-invariants
This reverts commit 1ee25278c891e95ba2ab142e5b78aebd752ea163.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa6e8d2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa6e8d2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa6e8d2d

Branch: refs/heads/YARN-5734
Commit: aa6e8d2dff533c3d0c86776567c860548723c21c
Parents: dba7a7d
Author: Steve Loughran 
Authored: Tue Sep 19 11:53:11 2017 +0100
Committer: Steve Loughran 
Committed: Tue Sep 19 11:53:11 2017 +0100

--
 hadoop-client-modules/hadoop-client/pom.xml | 31 
 1 file changed, 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa6e8d2d/hadoop-client-modules/hadoop-client/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client/pom.xml 
b/hadoop-client-modules/hadoop-client/pom.xml
index 6500ebf..bed3f5c 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -179,37 +179,6 @@
 
 
   org.apache.hadoop
-  hadoop-yarn-client
-  compile
-  
-
-
-  org.apache.hadoop
-  hadoop-yarn-api
-
-
-  org.apache.hadoop
-  hadoop-annotations
-
-
-  com.google.guava
-  guava
-
-
-  commons-cli
-  commons-cli
-
-
-  log4j
-  log4j
-
-  
-
-
-
-  org.apache.hadoop
   hadoop-mapreduce-client-core
   compile
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: YARN-7203. Add container ExecutionType into ContainerReport. (Botong Huang via asuresh)

2017-09-20 Thread jhung
YARN-7203. Add container ExecutionType into ContainerReport. (Botong Huang via 
asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56ef5279
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56ef5279
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56ef5279

Branch: refs/heads/YARN-5734
Commit: 56ef5279c1db93d03b2f1e04badbfe804f548918
Parents: 3cf3540
Author: Arun Suresh 
Authored: Mon Sep 18 15:49:31 2017 -0700
Committer: Arun Suresh 
Committed: Mon Sep 18 15:49:31 2017 -0700

--
 .../yarn/api/records/ContainerReport.java   | 26 
 .../src/main/proto/yarn_protos.proto|  1 +
 .../yarn/client/api/impl/TestYarnClient.java|  1 +
 .../records/impl/pb/ContainerReportPBImpl.java  | 20 +++
 .../rmcontainer/RMContainerImpl.java|  2 +-
 5 files changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
index 11d7bca..31d2812 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
@@ -52,6 +52,18 @@ public abstract class ContainerReport {
   long creationTime, long finishTime, String diagnosticInfo, String logUrl,
   int containerExitStatus, ContainerState containerState,
   String nodeHttpAddress) {
+return newInstance(containerId, allocatedResource, assignedNode, priority,
+creationTime, finishTime, diagnosticInfo, logUrl, containerExitStatus,
+containerState, nodeHttpAddress, ExecutionType.GUARANTEED);
+  }
+
+  @Private
+  @Unstable
+  public static ContainerReport newInstance(ContainerId containerId,
+  Resource allocatedResource, NodeId assignedNode, Priority priority,
+  long creationTime, long finishTime, String diagnosticInfo, String logUrl,
+  int containerExitStatus, ContainerState containerState,
+  String nodeHttpAddress, ExecutionType executionType) {
 ContainerReport report = Records.newRecord(ContainerReport.class);
 report.setContainerId(containerId);
 report.setAllocatedResource(allocatedResource);
@@ -64,6 +76,7 @@ public abstract class ContainerReport {
 report.setContainerExitStatus(containerExitStatus);
 report.setContainerState(containerState);
 report.setNodeHttpAddress(nodeHttpAddress);
+report.setExecutionType(executionType);
 return report;
   }
 
@@ -209,4 +222,17 @@ public abstract class ContainerReport {
   @Private
   @Unstable
   public abstract void setNodeHttpAddress(String nodeHttpAddress);
+
+  /**
+   * Get the execution type of the container.
+   *
+   * @return the execution type of the container
+   */
+  @Public
+  @Unstable
+  public abstract ExecutionType getExecutionType();
+
+  @Private
+  @Unstable
+  public abstract void setExecutionType(ExecutionType executionType);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 066441c..fb340d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -137,6 +137,7 @@ message ContainerReportProto {
   optional int32 container_exit_status = 9;
   optional ContainerStateProto container_state = 10;
   optional string node_http_address = 11;
+  optional ExecutionTypeProto executionType = 12 [default = GUARANTEED];
 }
 
 enum YarnApplicationStateProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 

[46/50] [abbrv] hadoop git commit: YARN-6322: Disable queue refresh when configuration mutation is enabled. Contributed by Jonathan Hung

2017-09-20 Thread jhung
YARN-6322: Disable queue refresh when configuration mutation is enabled. 
Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba537954
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba537954
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba537954

Branch: refs/heads/YARN-5734
Commit: ba537954b38af3c0c26490cdbc801721b8875c88
Parents: b06711c
Author: Xuan 
Authored: Tue Aug 1 08:48:04 2017 -0700
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:40:54 2017 -0700

--
 .../server/resourcemanager/AdminService.java| 12 +-
 .../scheduler/MutableConfScheduler.java | 12 --
 .../scheduler/MutableConfigurationProvider.java |  4 +++-
 .../scheduler/capacity/CapacityScheduler.java   |  9 ++--
 .../conf/MutableCSConfigurationProvider.java| 11 +-
 .../resourcemanager/TestRMAdminService.java | 23 
 .../TestMutableCSConfigurationProvider.java | 14 +++-
 7 files changed, 67 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba537954/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 3457ae3..fd9e849 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -29,6 +29,7 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ha.HAServiceProtocol;
@@ -92,6 +93,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSyst
 import 
org.apache.hadoop.yarn.server.resourcemanager.resource.DynamicResourceConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -384,6 +387,12 @@ public class AdminService extends CompositeService 
implements
 RefreshQueuesResponse response =
 recordFactory.newRecordInstance(RefreshQueuesResponse.class);
 try {
+  ResourceScheduler scheduler = rm.getRMContext().getScheduler();
+  if (scheduler instanceof MutableConfScheduler
+  && ((MutableConfScheduler) scheduler).isConfigurationMutable()) {
+throw new IOException("Scheduler configuration is mutable. " +
+operation + " is not allowed in this scenario.");
+  }
   refreshQueues();
   RMAuditLogger.logSuccess(user.getShortUserName(), operation,
   "AdminService");
@@ -393,7 +402,8 @@ public class AdminService extends CompositeService 
implements
 }
   }
 
-  private void refreshQueues() throws IOException, YarnException {
+  @Private
+  public void refreshQueues() throws IOException, YarnException {
 rm.getRMContext().getScheduler().reinitialize(getConfig(),
 this.rm.getRMContext());
 // refresh the reservation system

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba537954/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
 

[34/50] [abbrv] hadoop git commit: HADOOP-7308. Remove unused TaskLogAppender configurations from log4j.properties. Contributed by Todd Lipcon and J.Andreina.

2017-09-20 Thread jhung
HADOOP-7308. Remove unused TaskLogAppender configurations from 
log4j.properties. Contributed by Todd Lipcon and J.Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e58b247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e58b247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e58b247

Branch: refs/heads/YARN-5734
Commit: 7e58b2478ce10f54b9b9a647f22a69dd528a81e6
Parents: a9019e1
Author: Akira Ajisaka 
Authored: Wed Sep 20 21:07:45 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Sep 20 21:07:49 2017 +0900

--
 .../hadoop-common/src/main/conf/log4j.properties| 12 
 1 file changed, 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e58b247/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index bc1fa6c..5f4b22b 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -78,19 +78,7 @@ log4j.appender.console.layout.ConversionPattern=%d{ISO8601} 
%p %c{2}: %m%n
 #
 # TaskLog Appender
 #
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
 log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
 
 log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
 log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: HDFS-12472. Add JUNIT timeout to TestBlockStatsMXBean. Contributed by Bharat Viswanadham.

2017-09-20 Thread jhung
HDFS-12472. Add JUNIT timeout to TestBlockStatsMXBean. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d7cc22a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d7cc22a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d7cc22a

Branch: refs/heads/YARN-5734
Commit: 8d7cc22ac286302960c7939bc53574cbfeab1846
Parents: 7618fa9
Author: Arpit Agarwal 
Authored: Sat Sep 16 10:09:27 2017 -0700
Committer: Arpit Agarwal 
Committed: Sat Sep 16 10:09:27 2017 -0700

--
 .../hdfs/server/blockmanagement/TestBlockStatsMXBean.java   | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d7cc22a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index bcf38d6..64364cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -41,8 +41,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.eclipse.jetty.util.ajax.JSON;
+import org.junit.rules.Timeout;
 
 /**
  * Class for testing {@link BlockStatsMXBean} implementation
@@ -51,6 +53,9 @@ public class TestBlockStatsMXBean {
 
   private MiniDFSCluster cluster;
 
+  @Rule
+  public Timeout globalTimeout = new Timeout(30);
+
   @Before
   public void setup() throws IOException {
 HdfsConfiguration conf = new HdfsConfiguration();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: HDFS-12445. Correct spellings of choosen to chosen. Contributed by hu xiaodong.

2017-09-20 Thread jhung
HDFS-12445. Correct spellings of choosen to chosen. Contributed by hu xiaodong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51edaacd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51edaacd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51edaacd

Branch: refs/heads/YARN-5734
Commit: 51edaacd09d86419f99ca96545a1393db1f43f73
Parents: 59830ca
Author: Andrew Wang 
Authored: Tue Sep 19 13:48:23 2017 -0700
Committer: Andrew Wang 
Committed: Tue Sep 19 13:48:23 2017 -0700

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java  | 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java | 2 +-
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java| 2 +-
 .../org/apache/hadoop/examples/dancing/DistributedPentomino.java | 4 ++--
 .../main/java/org/apache/hadoop/examples/dancing/Pentomino.java  | 2 +-
 5 files changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f33ec63..0545bb2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3551,8 +3551,8 @@ public class BlockManager implements BlockStatsMXBean {
 List replicasToDelete = replicator
 .chooseReplicasToDelete(nonExcess, nonExcess, replication, excessTypes,
 addedNode, delNodeHint);
-for (DatanodeStorageInfo choosenReplica : replicasToDelete) {
-  processChosenExcessRedundancy(nonExcess, choosenReplica, storedBlock);
+for (DatanodeStorageInfo chosenReplica : replicasToDelete) {
+  processChosenExcessRedundancy(nonExcess, chosenReplica, storedBlock);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index b6c1318..1860565 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -178,7 +178,7 @@ public class TestDeadDatanode {
 clientNode, new HashSet<>(), 256 * 1024 * 1024L, null, (byte) 7,
 BlockType.CONTIGUOUS, null, null);
 for (DatanodeStorageInfo datanodeStorageInfo : results) {
-  assertFalse("Dead node should not be choosen", datanodeStorageInfo
+  assertFalse("Dead node should not be chosen", datanodeStorageInfo
   .getDatanodeDescriptor().equals(clientNode));
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
index 537b4d4..eef4461 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
@@ -368,7 +368,7 @@ public class DancingLinks {
 
   /**
* Make one move from a prefix
-   * @param goalRow the row that should be choosen
+   * @param goalRow the row that should be chosen
* @return the row that was found
*/
   private Node advance(int goalRow) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
--
diff --git 

[20/50] [abbrv] hadoop git commit: MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák.

2017-09-20 Thread jhung
MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. 
Contributed by Gergery Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2018538f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2018538f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2018538f

Branch: refs/heads/YARN-5734
Commit: 2018538fdba1a95a6556187569e872fce7f9e1c3
Parents: 56ef527
Author: Akira Ajisaka 
Authored: Tue Sep 19 11:05:54 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Sep 19 11:05:54 2017 +0900

--
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 ---
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 ---
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 +++
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraOutputFormat.java | 7 ---
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java| 7 ---
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java| 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java | 6 +++---
 9 files changed, 31 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index 7e98d7d..da4ec79 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,8 +29,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -51,6 +49,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -83,7 +83,8 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 8dec39d..7b73820 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -29,8 +29,6 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
@@ -49,6 +47,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.hsqldb.server.Server;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This is a demonstrative program, which uses DBInputFormat for reading
@@ -77,7 +77,8 @@ import org.hsqldb.server.Server;
  */
 public class DBCountPageView extends Configured implements Tool {
 
-  private static final Log 

[18/50] [abbrv] hadoop git commit: HADOOP-14835. mvn site build throws SAX errors. Contributed by Andrew Wang and Sean Mackrory.

2017-09-20 Thread jhung
HADOOP-14835. mvn site build throws SAX errors. Contributed by Andrew Wang and 
Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cf3540f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cf3540f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cf3540f

Branch: refs/heads/YARN-5734
Commit: 3cf3540f19b5fd1a174690db9f1b7be2977d96ba
Parents: b3d6130
Author: Andrew Wang 
Authored: Mon Sep 18 15:13:42 2017 -0700
Committer: Andrew Wang 
Committed: Mon Sep 18 15:13:42 2017 -0700

--
 BUILDING.txt   |  2 ++
 dev-support/bin/create-release |  1 +
 .../hadoop-mapreduce-client/pom.xml| 17 -
 hadoop-project-dist/pom.xml| 17 -
 hadoop-project/pom.xml |  2 ++
 hadoop-yarn-project/hadoop-yarn/pom.xml| 17 -
 6 files changed, 53 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 14deec8..47aaab4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -308,6 +308,8 @@ Create a local staging version of the website (in 
/tmp/hadoop-site)
 
   $ mvn clean site -Preleasedocs; mvn site:stage 
-DstagingDirectory=/tmp/hadoop-site
 
+Note that the site needs to be built in a second pass after other artifacts.
+
 
--
 Installing Hadoop
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index b22e90b..b98c058 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -564,6 +564,7 @@ function makearelease
 "${MVN}" "${MVN_ARGS[@]}" install \
   site site:stage \
   -DskipTests \
+  -DskipShade \
   -Pdist,src \
   "${DOCFLAGS}"
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
--
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index aa7c7b1..274a821 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -196,6 +196,13 @@
 -unstable
 512m
   
+  
+
+  xerces
+  xercesImpl
+  ${xerces.jdiff.version}
+
+  
   
 
   
@@ -238,6 +245,14 @@
   
${project.build.directory}
   hadoop-annotations.jar
 
+
+  xerces
+  xercesImpl
+  ${xerces.version.jdiff}
+  false
+  
${project.build.directory}
+  xerces.jar
+
   
 
   
@@ -275,7 +290,7 @@

sourceFiles="${dev-support.relative.dir}/jdiff/Null.java"
maxmemory="${jdiff.javadoc.maxmemory}">
 
+
path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index addc2a5..8815dd4 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -152,6 +152,13 @@
 
 512m
   
+  
+
+  xerces
+  xercesImpl
+  ${xerces.jdiff.version}
+
+  
   
 
   
@@ -194,6 +201,14 @@
   
${project.build.directory}
   hadoop-annotations.jar
 
+
+  xerces
+  xercesImpl
+  ${xerces.jdiff.version}
+  false
+  
${project.build.directory}
+  xerces.jar
+
   
 
   
@@ -259,7 +274,7 @@
  

[31/50] [abbrv] hadoop git commit: HDFS-12437. Fix test setup in TestLeaseRecoveryStriped.

2017-09-20 Thread jhung
HDFS-12437. Fix test setup in TestLeaseRecoveryStriped.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12d9d7bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12d9d7bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12d9d7bc

Branch: refs/heads/YARN-5734
Commit: 12d9d7bc509bca82b8f40301e3dc5ca764be45eb
Parents: 51edaac
Author: Andrew Wang 
Authored: Tue Sep 19 16:42:20 2017 -0700
Committer: Andrew Wang 
Committed: Tue Sep 19 16:42:20 2017 -0700

--
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   | 156 ++-
 1 file changed, 113 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d9d7bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 2846dbf..36ac8b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -19,8 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.commons.lang.builder.ToStringBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -28,6 +27,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.io.IOUtils;
@@ -40,34 +40,41 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeoutException;
 
 public class TestLeaseRecoveryStriped {
-  public static final Log LOG = LogFactory
-  .getLog(TestLeaseRecoveryStriped.class);
+  public static final Logger LOG = LoggerFactory
+  .getLogger(TestLeaseRecoveryStriped.class);
 
   private final ErasureCodingPolicy ecPolicy =
   StripedFileTestUtil.getDefaultECPolicy();
   private final int dataBlocks = ecPolicy.getNumDataUnits();
   private final int parityBlocks = ecPolicy.getNumParityUnits();
   private final int cellSize = ecPolicy.getCellSize();
-  private final int stripSize = dataBlocks * cellSize;
-  private final int stripesPerBlock = 15;
+  private final int stripeSize = dataBlocks * cellSize;
+  private final int stripesPerBlock = 4;
   private final int blockSize = cellSize * stripesPerBlock;
   private final int blockGroupSize = blockSize * dataBlocks;
   private static final int bytesPerChecksum = 512;
 
   static {
 GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
+GenericTestUtils.setLogLevel(DFSStripedOutputStream.LOG, Level.DEBUG);
+GenericTestUtils.setLogLevel(BlockRecoveryWorker.LOG, Level.DEBUG);
+GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.DEBUG);
   }
 
   static private final String fakeUsername = "fakeUser1";
@@ -83,7 +90,7 @@ public class TestLeaseRecoveryStriped {
   public void setup() throws IOException {
 conf = new HdfsConfiguration();
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
-conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6000L);
+conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6L);
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
 false);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -104,78 +111,118 @@ public class TestLeaseRecoveryStriped {
 }
   }
 
-  private int[][][] getBlockLengthsSuite() {
+  private static class BlockLengths {
+private final int[] blockLengths;
+private final long safeLength;
+
+BlockLengths(ErasureCodingPolicy 

[41/50] [abbrv] hadoop git commit: YARN-6575. Support global configuration mutation in MutableConfProvider. (Jonathan Hung via Xuan Gong)

2017-09-20 Thread jhung
YARN-6575. Support global configuration mutation in MutableConfProvider. 
(Jonathan Hung via Xuan Gong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ac2ccf9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ac2ccf9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ac2ccf9

Branch: refs/heads/YARN-5734
Commit: 1ac2ccf997d90f7a9ed7b87e980d23536cb871f8
Parents: 78baf50
Author: Xuan 
Authored: Mon Jun 5 16:30:38 2017 -0700
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:40:53 2017 -0700

--
 .../ConfigurationMutationACLPolicy.java |   4 +-
 .../DefaultConfigurationMutationACLPolicy.java  |   4 +-
 .../scheduler/MutableConfScheduler.java |   4 +-
 .../scheduler/MutableConfigurationProvider.java |   4 +-
 .../scheduler/capacity/CapacityScheduler.java   |   4 +-
 .../conf/MutableCSConfigurationProvider.java|  10 +-
 ...ueueAdminConfigurationMutationACLPolicy.java |  22 +++-
 .../resourcemanager/webapp/RMWebServices.java   |   4 +-
 .../webapp/dao/QueueConfigsUpdateInfo.java  |  60 ---
 .../webapp/dao/SchedConfUpdateInfo.java |  69 +
 .../TestConfigurationMutationACLPolicies.java   |  28 -
 .../TestMutableCSConfigurationProvider.java |  10 +-
 .../TestRMWebServicesConfigurationMutation.java | 101 +--
 13 files changed, 205 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ac2ccf9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
index 724487b..3a388fe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
@@ -21,7 +21,7 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 
 /**
  * Interface for determining whether configuration mutations are allowed.
@@ -41,7 +41,7 @@ public interface ConfigurationMutationACLPolicy {
* @param confUpdate configurations to be updated
* @return whether provided mutation is allowed or not
*/
-  boolean isMutationAllowed(UserGroupInformation user, QueueConfigsUpdateInfo
+  boolean isMutationAllowed(UserGroupInformation user, SchedConfUpdateInfo
   confUpdate);
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ac2ccf9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
index 680c3b8..6648668 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import 

[40/50] [abbrv] hadoop git commit: YARN-5948. Implement MutableConfigurationManager for handling storage into configuration store

2017-09-20 Thread jhung
YARN-5948. Implement MutableConfigurationManager for handling storage into 
configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0de6349a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0de6349a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0de6349a

Branch: refs/heads/YARN-5734
Commit: 0de6349a2435c367507c9a631dc4e4c536cf4c66
Parents: 8d9ba97
Author: Jonathan Hung 
Authored: Wed Mar 1 16:03:01 2017 -0800
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:40:02 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 ++
 .../src/main/resources/yarn-default.xml | 12 +++
 .../scheduler/MutableConfigurationProvider.java | 35 
 .../scheduler/capacity/CapacityScheduler.java   | 14 ++-
 .../CapacitySchedulerConfiguration.java |  3 +
 .../capacity/conf/CSConfigurationProvider.java  |  3 +-
 .../conf/MutableCSConfigurationProvider.java| 94 
 .../conf/YarnConfigurationStoreFactory.java | 46 ++
 .../TestMutableCSConfigurationProvider.java | 83 +
 9 files changed, 291 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de6349a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 114453f..2f77316 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -674,6 +674,12 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
   "org.apache.hadoop.yarn.LocalConfigurationProvider";
 
+  public static final String SCHEDULER_CONFIGURATION_STORE_CLASS =
+  YARN_PREFIX + "scheduler.configuration.store.class";
+  public static final String MEMORY_CONFIGURATION_STORE = "memory";
+  public static final String DEFAULT_CONFIGURATION_STORE =
+  MEMORY_CONFIGURATION_STORE;
+
   public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
   + "authorization-provider";
   private static final List RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de6349a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 0440458..f05e005 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3348,4 +3348,16 @@
   
   
 
+  
+
+  The type of configuration store to use for storing scheduler
+  configurations, if using a mutable configuration provider.
+  Keywords such as "memory" map to certain configuration store
+  implementations. If keyword is not found, try to load this
+  value as a class.
+
+yarn.scheduler.configuration.store.class
+memory
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de6349a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
new file mode 100644
index 000..da30a2b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * 

[29/50] [abbrv] hadoop git commit: HDFS-12444. Reduce runtime of TestWriteReadStripedFile. Contributed by Huafeng Wang and Andrew Wang.

2017-09-20 Thread jhung
HDFS-12444. Reduce runtime of TestWriteReadStripedFile. Contributed by Huafeng 
Wang and Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59830ca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59830ca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59830ca7

Branch: refs/heads/YARN-5734
Commit: 59830ca772dfb5dcc8b3e5281ca482dea5a5fa3e
Parents: 7bbeacb
Author: Andrew Wang 
Authored: Tue Sep 19 13:44:42 2017 -0700
Committer: Andrew Wang 
Committed: Tue Sep 19 13:44:42 2017 -0700

--
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 13 +++
 .../hadoop/hdfs/TestWriteReadStripedFile.java   | 24 
 .../hdfs/TestWriteStripedFileWithFailure.java   |  3 ++-
 3 files changed, 25 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59830ca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 1489e48..c771d21 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -79,10 +79,15 @@ public class StripedFileTestUtil {
 assertEquals("File length should be the same", fileLength, 
status.getLen());
   }
 
-  static void verifyPread(FileSystem fs, Path srcPath,  int fileLength,
-  byte[] expected, byte[] buf) throws IOException {
-final ErasureCodingPolicy ecPolicy =
-((DistributedFileSystem)fs).getErasureCodingPolicy(srcPath);
+  static void verifyPread(DistributedFileSystem fs, Path srcPath,
+  int fileLength, byte[] expected, byte[] buf) throws IOException {
+final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(srcPath);
+verifyPread(fs, srcPath, fileLength, expected, buf, ecPolicy);
+  }
+
+  static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
+  byte[] expected, byte[] buf, ErasureCodingPolicy ecPolicy)
+  throws IOException {
 try (FSDataInputStream in = fs.open(srcPath)) {
   int[] startOffsets = {0, 1, ecPolicy.getCellSize() - 102,
   ecPolicy.getCellSize(), ecPolicy.getCellSize() + 102,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59830ca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
index f27c978..805bcea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
@@ -47,12 +48,13 @@ import java.util.Random;
 public class TestWriteReadStripedFile {
   public static final Log LOG = 
LogFactory.getLog(TestWriteReadStripedFile.class);
   private final ErasureCodingPolicy ecPolicy =
-  StripedFileTestUtil.getDefaultECPolicy();
+  SystemErasureCodingPolicies.getByID(
+  SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
   private final int cellSize = ecPolicy.getCellSize();
   private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
   private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
   private final int numDNs = dataBlocks + parityBlocks;
-  private final int stripesPerBlock = 4;
+  private final int stripesPerBlock = 2;
   private final int blockSize = stripesPerBlock * cellSize;
   private final int blockGroupSize = blockSize * dataBlocks;
 
@@ -78,11 +80,10 @@ public class TestWriteReadStripedFile {
 false);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
 fs = cluster.getFileSystem();
-fs.enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
+

[06/50] [abbrv] hadoop git commit: HADOOP-13714. Tighten up our compatibility guidelines for Hadoop 3

2017-09-20 Thread jhung
HADOOP-13714. Tighten up our compatibility guidelines for Hadoop 3


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7618fa91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7618fa91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7618fa91

Branch: refs/heads/YARN-5734
Commit: 7618fa9194b40454405f11a25bec4e2d79506912
Parents: 38c14ef
Author: Daniel Templeton 
Authored: Sat Sep 16 09:20:33 2017 +0200
Committer: Daniel Templeton 
Committed: Sat Sep 16 09:20:33 2017 +0200

--
 .../src/site/markdown/Compatibility.md  | 645 +++
 .../site/markdown/InterfaceClassification.md| 227 ---
 2 files changed, 675 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7618fa91/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 05b18b5..4fa8c02 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -20,109 +20,276 @@ Apache Hadoop Compatibility
 Purpose
 ---
 
-This document captures the compatibility goals of the Apache Hadoop project. 
The different types of compatibility between Hadoop releases that affects 
Hadoop developers, downstream projects, and end-users are enumerated. For each 
type of compatibility we:
+This document captures the compatibility goals of the Apache Hadoop project.
+The different types of compatibility between Hadoop releases that affect
+Hadoop developers, downstream projects, and end-users are enumerated. For each
+type of compatibility this document will:
 
 * describe the impact on downstream projects or end-users
 * where applicable, call out the policy adopted by the Hadoop developers when 
incompatible changes are permitted.
 
+All Hadoop interfaces are classified according to the intended audience and
+stability in order to maintain compatibility with previous releases. See the
+[Hadoop Interface Taxonomy](./InterfaceClassification.html) for details
+about the classifications.
+
+### Target Audience
+
+This document is intended for consumption by the Hadoop developer community.
+This document describes the lens through which changes to the Hadoop project
+should be viewed. In order for end users and third party developers to have
+confidence about cross-release compatibility, the developer community must
+ensure that development efforts adhere to these policies. It is the
+responsibility of the project committers to validate that all changes either
+maintain compatibility or are explicitly marked as incompatible.
+
+Within a component Hadoop developers are free to use Private and Limited 
Private
+APIs, but when using components from a different module Hadoop developers
+should follow the same guidelines as third-party developers: do not
+use Private or Limited Private (unless explicitly allowed) interfaces and
+prefer instead Stable interfaces to Evolving or Unstable interfaces where
+possible. Where not possible, the preferred solution is to expand the audience
+of the API rather than introducing or perpetuating an exception to these
+compatibility guidelines. When working within a Maven module Hadoop developers
+should observe where possible the same level of restraint with regard to
+using components located in other Maven modules.
+
+Above all, Hadoop developers must be mindful of the impact of their changes.
+Stable interfaces must not change between major releases. Evolving interfaces
+must not change between minor releases. New classes and components must be
+labeled appropriately for audience and stability. See the
+[Hadoop Interface Taxonomy](./InterfaceClassification.html) for details about
+when the various labels are appropriate. As a general rule, all new interfaces
+and APIs should have the most limited labels (e.g. Private Unstable) that will
+not inhibit the intent of the interface or API.
+
+### Notational Conventions
+
+The key words "MUST" "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
+"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" are to be interpreted as
+described in [RFC 2119](http://tools.ietf.org/html/rfc2119).
+
+Deprecation
+---
+
+The Java API provides a @Deprecated annotation to mark an API element as
+flagged for removal. The standard meaning of the annotation is that the
+API element should not be used and may be removed in a later version.
+
+In all cases removing an element from an API is an incompatible
+change. In the case of 

[09/50] [abbrv] hadoop git commit: HDFS-12460. Make addErasureCodingPolicy an idempotent operation. Contributed by Sammi Chen

2017-09-20 Thread jhung
HDFS-12460. Make addErasureCodingPolicy an idempotent operation. Contributed by 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f9af246
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f9af246
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f9af246

Branch: refs/heads/YARN-5734
Commit: 0f9af246e89e4ad3c4d7ff2c1d7ec9b397494a03
Parents: e81596d
Author: Kai Zheng 
Authored: Mon Sep 18 18:07:12 2017 +0800
Committer: Kai Zheng 
Committed: Mon Sep 18 18:07:12 2017 +0800

--
 .../hdfs/server/namenode/ErasureCodingPolicyManager.java  | 7 ---
 .../org/apache/hadoop/hdfs/TestErasureCodingPolicies.java | 2 +-
 .../hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java   | 4 ++--
 .../hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java  | 4 ++--
 4 files changed, 9 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f9af246/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 3a46c30..90699b4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -242,14 +242,15 @@ public final class ErasureCodingPolicyManager {
 policy.getSchema(), policy.getCellSize());
 for (ErasureCodingPolicy p : getPolicies()) {
   if (p.getName().equals(assignedNewName)) {
-throw new HadoopIllegalArgumentException("The policy name " +
-assignedNewName + " already exists");
+LOG.info("The policy name " + assignedNewName + " already exists");
+return p;
   }
   if (p.getSchema().equals(policy.getSchema()) &&
   p.getCellSize() == policy.getCellSize()) {
-throw new HadoopIllegalArgumentException("A policy with same schema "
+LOG.info("A policy with same schema "
 + policy.getSchema().toString() + " and cell size "
 + p.getCellSize() + " already exists");
+return p;
   }
 }
 policy.setName(assignedNewName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f9af246/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 19277c4..4f2040b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -718,7 +718,7 @@ public class TestErasureCodingPolicies {
 policyArray  = new ErasureCodingPolicy[]{policy0};
 responses = fs.addErasureCodingPolicies(policyArray);
 assertEquals(1, responses.length);
-assertFalse(responses[0].isSucceed());
+assertTrue(responses[0].isSucceed());
 
 // Test add policy successfully
 newPolicy =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f9af246/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
index d217813..42ff698 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
@@ -436,7 +436,7 @@ public class TestNamenodeRetryCache {
 
 LightWeightCache cacheSet = 
 (LightWeightCache) 
namesystem.getRetryCache().getCacheSet();
-assertEquals("Retry cache size is wrong", 26, cacheSet.size());
+assertEquals("Retry cache size is wrong", 34, cacheSet.size());
 
 Map oldEntries = 
 new 

[36/50] [abbrv] hadoop git commit: HDFS-11035. Better documentation for maintenace mode and upgrade domain.

2017-09-20 Thread jhung
HDFS-11035. Better documentation for maintenace mode and upgrade domain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce943eb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce943eb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce943eb1

Branch: refs/heads/YARN-5734
Commit: ce943eb17a4218d8ac1f5293c6726122371d8442
Parents: 230b85d
Author: Ming Ma 
Authored: Wed Sep 20 09:36:33 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:36:33 2017 -0700

--
 .../src/site/markdown/HdfsDataNodeAdminGuide.md | 165 ++
 .../src/site/markdown/HdfsUpgradeDomain.md  | 167 +++
 hadoop-project/src/site/site.xml|   4 +-
 3 files changed, 335 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce943eb1/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
new file mode 100644
index 000..d6f288e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
@@ -0,0 +1,165 @@
+
+
+HDFS DataNode Admin Guide
+=
+
+
+
+Overview
+
+
+The Hadoop Distributed File System (HDFS) namenode maintains states of all 
datanodes.
+There are two types of states. The fist type describes the liveness of a 
datanode indicating if
+the node is live, dead or stale. The second type describes the admin state 
indicating if the node
+is in service, decommissioned or under maintenance.
+
+When an administrator decommission a datanode, the datanode will first be 
transitioned into
+`DECOMMISSION_INPROGRESS` state. After all blocks belonging to that datanode 
have been fully replicated elsewhere
+based on each block's replication factor. the datanode will be transitioned to 
`DECOMMISSIONED` state. After that,
+the administrator can shutdown the node to perform long-term repair and 
maintenance that could take days or weeks.
+After the machine has been repaired, the machine can be recommissioned back to 
the cluster.
+
+Sometimes administrators only need to take datanodes down for minutes/hours to 
perform short-term repair/maintenance.
+In such scenario, the HDFS block replication overhead incurred by decommission 
might not be necessary and a light-weight process is desirable.
+And that is what maintenance state is used for. When an administrator put a 
datanode in maintenance state, the datanode will first be transitioned
+to `ENTERING_MAINTENANCE` state. As long as all blocks belonging to that 
datanode is minimally replicated elsewhere, the datanode
+will immediately be transitioned to `IN_MAINTENANCE` state. After the 
maintenance has completed, the administrator can take the datanode
+out of the maintenance state. In addition, maintenance state supports timeout 
that allows administrators to config the maximum duration in
+which a datanode is allowed to stay in maintenance state. After the timeout, 
the datanode will be transitioned out of maintenance state
+automatically by HDFS without human intervention.
+
+In summary, datanode admin operations include the followings:
+
+* Decommission
+* Recommission
+* Putting nodes in maintenance state
+* Taking nodes out of maintenance state
+
+And datanode admin states include the followings:
+
+* `NORMAL` The node is in service.
+* `DECOMMISSIONED` The node has been decommissioned.
+* `DECOMMISSION_INPROGRESS` The node is being transitioned to DECOMMISSIONED 
state.
+* `IN_MAINTENANCE` The node in in maintenance state.
+* `ENTERING_MAINTENANCE` The node is being transitioned to maintenance state.
+
+
+Host-level settings
+---
+
+To perform any of datanode admin operations, there are two steps.
+
+* Update host-level configuration files to indicate the desired admin states 
of targeted datanodes. There are two supported formats for configuration files.
+* Hostname-only configuration. Each line includes the hostname/ip address 
for a datanode. That is the default format.
+* JSON-based configuration. The configuration is in JSON format. Each 
element maps to one datanode and each datanode can have multiple properties. 
This format is required to put datanodes to maintenance states.
+
+* Run the following command to have namenode reload the host-level 
configuration files.
+`hdfs dfsadmin [-refreshNodes]`
+
+### Hostname-only configuration
+This is the default configuration used by the namenode. It only supports node 
decommission and recommission; it doesn't support admin operations related to 
maintenance state. Use 

[10/50] [abbrv] hadoop git commit: YARN-7192. Add a pluggable StateMachine Listener that is notified of NM Container State changes. Contributed by Arun Suresh

2017-09-20 Thread jhung
YARN-7192. Add a pluggable StateMachine Listener that is notified of NM 
Container State changes. Contributed by Arun Suresh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4f9c7c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4f9c7c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4f9c7c9

Branch: refs/heads/YARN-5734
Commit: a4f9c7c9247801dd37beec6fc195622af1b884ad
Parents: 0f9af24
Author: Jason Lowe 
Authored: Mon Sep 18 10:16:09 2017 -0500
Committer: Jason Lowe 
Committed: Mon Sep 18 10:16:09 2017 -0500

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 +-
 .../state/MultiStateTransitionListener.java | 61 ++
 .../hadoop/yarn/state/StateMachineFactory.java  | 40 
 .../yarn/state/StateTransitionListener.java | 50 ++
 .../src/main/resources/yarn-default.xml |  6 ++
 .../ContainerStateTransitionListener.java   | 48 ++
 .../hadoop/yarn/server/nodemanager/Context.java |  2 +
 .../yarn/server/nodemanager/NodeManager.java| 48 +-
 .../container/ContainerImpl.java|  3 +-
 .../server/nodemanager/TestNodeManager.java | 68 
 .../amrmproxy/BaseAMRMProxyTest.java|  8 +++
 .../container/TestContainer.java| 53 +++
 12 files changed, 389 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 48910b3..114453f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -968,9 +968,13 @@ public class YarnConfiguration extends Configuration {
 NM_PREFIX + "bind-host";
 
   /** who will execute(launch) the containers.*/
-  public static final String NM_CONTAINER_EXECUTOR = 
+  public static final String NM_CONTAINER_EXECUTOR =
 NM_PREFIX + "container-executor.class";
 
+  /** List of container state transition listeners.*/
+  public static final String NM_CONTAINER_STATE_TRANSITION_LISTENERS =
+  NM_PREFIX + "container-state-transition-listener.classes";
+
   /**  
* Adjustment to make to the container os scheduling priority.
* The valid values for this could vary depending on the platform.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
new file mode 100644
index 000..1a28fc5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.state;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A {@link StateTransitionListener} that dispatches the pre and post
+ * state transitions to multiple registered listeners.
+ * NOTE: The registered listeners are called in a for loop. Clients should
+ *   know that a listener configured earlier might prevent a later listener
+ *   

[26/50] [abbrv] hadoop git commit: MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe

2017-09-20 Thread jhung
MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. 
Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a20debd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a20debd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a20debd

Branch: refs/heads/YARN-5734
Commit: 3a20debddeac69596ceb5b36f8413529ea8570e6
Parents: ea845ba
Author: Jason Lowe 
Authored: Tue Sep 19 09:13:17 2017 -0500
Committer: Jason Lowe 
Committed: Tue Sep 19 09:13:17 2017 -0500

--
 .../org/apache/hadoop/mapred/ShuffleHandler.java  | 18 +++---
 1 file changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a20debd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 863da7e..b7f2c6d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,13 +992,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
 
-  // this audit log is disabled by default,
-  // to turn it on please enable this audit log
-  // on log4j.properties by uncommenting the setting
-  if (AUDITLOG.isDebugEnabled()) {
-AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
- " reducer " + reduceQ.get(0));
-  }
   int reduceId;
   String jobId;
   try {
@@ -1183,6 +1176,17 @@ public class ShuffleHandler extends AuxiliaryService {
 
   // Now set the response headers.
   setResponseHeaders(response, keepAliveParam, contentLength);
+
+  // this audit log is disabled by default,
+  // to turn it on please enable this audit log
+  // on log4j.properties by uncommenting the setting
+  if (AUDITLOG.isDebugEnabled()) {
+StringBuilder sb = new StringBuilder("shuffle for ");
+sb.append(jobId).append(" reducer ").append(reduce);
+sb.append(" length ").append(contentLength);
+sb.append(" mappers: ").append(mapIds);
+AUDITLOG.debug(sb.toString());
+  }
 }
 
 protected void setResponseHeaders(HttpResponse response,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: YARN-7172. ResourceCalculator.fitsIn() should not take a cluster resource parameter. (Sen Zhao via wangda)

2017-09-20 Thread jhung
YARN-7172. ResourceCalculator.fitsIn() should not take a cluster resource 
parameter. (Sen Zhao via wangda)

Change-Id: Icc3670c9381ce7591ca69ec12da5aa52d3612d34


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e81596d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e81596d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e81596d0

Branch: refs/heads/YARN-5734
Commit: e81596d06d226f1cfa44b2390ce3095ed4dee621
Parents: 8d7cc22
Author: Wangda Tan 
Authored: Sun Sep 17 21:20:43 2017 -0700
Committer: Wangda Tan 
Committed: Sun Sep 17 21:20:43 2017 -0700

--
 .../resource/DefaultResourceCalculator.java |  3 +-
 .../resource/DominantResourceCalculator.java|  2 +-
 .../yarn/util/resource/ResourceCalculator.java  |  3 +-
 .../hadoop/yarn/util/resource/Resources.java|  4 +--
 .../util/resource/TestResourceCalculator.java   | 24 +++---
 .../server/resourcemanager/RMServerUtils.java   |  3 +-
 .../CapacitySchedulerPreemptionUtils.java   |  4 +--
 ...QueuePriorityContainerCandidateSelector.java |  5 ++-
 .../ReservedContainerCandidatesSelector.java| 34 +---
 .../scheduler/capacity/AbstractCSQueue.java |  2 +-
 .../allocator/RegularContainerAllocator.java|  8 ++---
 .../scheduler/common/fica/FiCaSchedulerApp.java | 21 +---
 .../scheduler/capacity/TestReservations.java| 20 +---
 13 files changed, 55 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index bdf60bd..7f155e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -123,8 +123,7 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public boolean fitsIn(Resource cluster,
-  Resource smaller, Resource bigger) {
+  public boolean fitsIn(Resource smaller, Resource bigger) {
 return smaller.getMemorySize() <= bigger.getMemorySize();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index d64f03e..ca828a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -538,7 +538,7 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public boolean fitsIn(Resource cluster, Resource smaller, Resource bigger) {
+  public boolean fitsIn(Resource smaller, Resource bigger) {
 int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
 for (int i = 0; i < maxLength; i++) {
   ResourceInformation sResourceInformation = smaller

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 398dac5..d59560f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -225,8 +225,7 @@ 

[33/50] [abbrv] hadoop git commit: YARN-7186. Fix finicky TestContainerManager tests. Contributed by Arun Suresh.

2017-09-20 Thread jhung
YARN-7186. Fix finicky TestContainerManager tests. Contributed by Arun Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/647b7527
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/647b7527
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/647b7527

Branch: refs/heads/YARN-5734
Commit: 647b7527a9cdf4717e7dcbbb660e5812b67a17f1
Parents: 12d9d7b
Author: Junping Du 
Authored: Tue Sep 19 18:31:15 2017 -0700
Committer: Junping Du 
Committed: Tue Sep 19 18:31:15 2017 -0700

--
 .../containermanager/TestContainerManager.java  | 128 ---
 .../TestContainerSchedulerQueuing.java  |  70 ++
 2 files changed, 70 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/647b7527/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 6eea77b..38df208 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -70,7 +70,6 @@ import 
org.apache.hadoop.yarn.api.records.ContainerRetryContext;
 import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
@@ -105,7 +104,6 @@ import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
@@ -142,14 +140,6 @@ public class TestContainerManager extends 
BaseContainerManagerTest {
 exec.setConf(conf);
 return spy(exec);
   }
-
-  @Override
-  @Before
-  public void setup() throws IOException {
-conf.setInt(
-YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 0);
-super.setup();
-  }
   
   @Override
   protected ContainerManagerImpl
@@ -1945,122 +1935,4 @@ public class TestContainerManager extends 
BaseContainerManagerTest {
 Assert.assertTrue(response.getFailedRequests().get(cId).getMessage()
 .contains("Null resource visibility for local resource"));
   }
-
-  @Test
-  public void testContainerUpdateExecTypeOpportunisticToGuaranteed()
-  throws IOException, YarnException, InterruptedException {
-delayContainers = true;
-containerManager.start();
-// Construct the Container-id
-ContainerId cId = createContainerId(0);
-ContainerLaunchContext containerLaunchContext =
-recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
-StartContainerRequest scRequest =
-StartContainerRequest.newInstance(
-containerLaunchContext,
-createContainerToken(cId, DUMMY_RM_IDENTIFIER,
-context.getNodeId(), user, BuilderUtils.newResource(512, 1),
-context.getContainerTokenSecretManager(), null,
-ExecutionType.OPPORTUNISTIC));
-List list = new ArrayList<>();
-list.add(scRequest);
-StartContainersRequest allRequests =
-StartContainersRequest.newInstance(list);
-containerManager.startContainers(allRequests);
-// Make sure the container reaches RUNNING state
-BaseContainerManagerTest.waitForNMContainerState(containerManager, cId,
-org.apache.hadoop.yarn.server.nodemanager.
-containermanager.container.ContainerState.RUNNING);
-// Construct container resource increase request,
-List updateTokens = new ArrayList<>();
-Token containerToken =
-createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, 

[49/50] [abbrv] hadoop git commit: YARN-6840. Implement zookeeper based store for scheduler configuration updates. (Jonathan Hung via wangda)

2017-09-20 Thread jhung
http://git-wip-us.apache.org/repos/asf/hadoop/blob/034e6f4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
new file mode 100644
index 000..3cfa8da
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
@@ -0,0 +1,312 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.RetryNTimes;
+import org.apache.curator.test.TestingServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.yarn.conf.HAUtil;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+/**
+ * Tests {@link ZKConfigurationStore}.
+ */
+public class TestZKConfigurationStore extends ConfigurationStoreBaseTest {
+
+  public static final Log LOG =
+  LogFactory.getLog(TestZKConfigurationStore.class);
+
+  private static final int ZK_TIMEOUT_MS = 1;
+  private TestingServer curatorTestingServer;
+  private CuratorFramework curatorFramework;
+  private ResourceManager rm;
+
+  public static TestingServer setupCuratorServer() throws Exception {
+TestingServer curatorTestingServer = new TestingServer();
+curatorTestingServer.start();
+return curatorTestingServer;
+  }
+
+  public static CuratorFramework setupCuratorFramework(
+  TestingServer curatorTestingServer) throws Exception {
+CuratorFramework curatorFramework = CuratorFrameworkFactory.builder()
+.connectString(curatorTestingServer.getConnectString())
+.retryPolicy(new RetryNTimes(100, 100))
+.build();
+curatorFramework.start();
+return curatorFramework;
+  }
+
+  @Before
+  public void setUp() throws Exception {
+super.setUp();
+curatorTestingServer = setupCuratorServer();
+curatorFramework = setupCuratorFramework(curatorTestingServer);
+
+conf.set(CommonConfigurationKeys.ZK_ADDRESS,
+curatorTestingServer.getConnectString());
+rm = new MockRM(conf);
+rm.start();
+rmContext = rm.getRMContext();
+  }
+
+  @After
+  public void cleanup() throws IOException {
+rm.stop();
+

[35/50] [abbrv] hadoop git commit: HDFS-12473. Change hosts JSON file format.

2017-09-20 Thread jhung
HDFS-12473. Change hosts JSON file format.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/230b85d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/230b85d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/230b85d5

Branch: refs/heads/YARN-5734
Commit: 230b85d5865b7e08fb7aaeab45295b5b966011ef
Parents: 7e58b24
Author: Ming Ma 
Authored: Wed Sep 20 09:03:59 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:03:59 2017 -0700

--
 .../hdfs/util/CombinedHostsFileReader.java  | 67 ++--
 .../hdfs/util/CombinedHostsFileWriter.java  | 23 ---
 .../CombinedHostFileManager.java|  3 +-
 .../hdfs/util/TestCombinedHostsFileReader.java  | 44 -
 .../src/test/resources/dfs.hosts.json   | 16 +++--
 .../src/test/resources/legacy.dfs.hosts.json|  7 ++
 6 files changed, 102 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/230b85d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
index 8da5655..aa8e4c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
@@ -19,58 +19,85 @@
 package org.apache.hadoop.hdfs.util;
 
 import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.JsonMappingException;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
+
 import java.io.FileInputStream;
 import java.io.InputStreamReader;
 import java.io.IOException;
 import java.io.Reader;
+import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.Set;
-import java.util.HashSet;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
- * Reader support for JSON based datanode configuration, an alternative
+ * Reader support for JSON-based datanode configuration, an alternative format
  * to the exclude/include files configuration.
- * The JSON file format is the array of elements where each element
+ * The JSON file format defines the array of elements where each element
  * in the array describes the properties of a datanode. The properties of
- * a datanode is defined in {@link DatanodeAdminProperties}. For example,
+ * a datanode is defined by {@link DatanodeAdminProperties}. For example,
  *
- * {"hostName": "host1"}
- * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
- * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * [
+ *   {"hostName": "host1"},
+ *   {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
+ *   {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * ]
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 public final class CombinedHostsFileReader {
-  private static final ObjectReader READER =
-  new ObjectMapper().readerFor(DatanodeAdminProperties.class);
-  private static final JsonFactory JSON_FACTORY = new JsonFactory();
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(CombinedHostsFileReader.class);
 
   private CombinedHostsFileReader() {
   }
 
   /**
* Deserialize a set of DatanodeAdminProperties from a json file.
-   * @param hostsFile the input json file to read from.
+   * @param hostsFile the input json file to read from
* @return the set of DatanodeAdminProperties
* @throws IOException
*/
-  public static Set
+  public static DatanodeAdminProperties[]
   readFile(final String hostsFile) throws IOException {
-HashSet allDNs = new HashSet<>();
+DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
+ObjectMapper objectMapper = new ObjectMapper();
+boolean tryOldFormat = false;
 try (Reader input =
- new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
-  Iterator iterator =
-  READER.readValues(JSON_FACTORY.createParser(input));
-  while (iterator.hasNext()) {
-DatanodeAdminProperties properties = iterator.next();
-allDNs.add(properties);
+new InputStreamReader(new 

[13/50] [abbrv] hadoop git commit: MAPREDUCE-6954. Disable erasure coding for files that are uploaded to the MR staging area (pbacsko via rkanter)

2017-09-20 Thread jhung
MAPREDUCE-6954. Disable erasure coding for files that are uploaded to the MR 
staging area (pbacsko via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0adc0471
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0adc0471
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0adc0471

Branch: refs/heads/YARN-5734
Commit: 0adc0471d0c06f66a31060f270dcb50a7b4ffafa
Parents: 5f49668
Author: Robert Kanter 
Authored: Mon Sep 18 10:40:06 2017 -0700
Committer: Robert Kanter 
Committed: Mon Sep 18 10:40:06 2017 -0700

--
 .../hadoop-mapreduce-client-core/pom.xml|  4 ++
 .../hadoop/mapreduce/JobResourceUploader.java   | 17 
 .../apache/hadoop/mapreduce/MRJobConfig.java|  5 +++
 .../src/main/resources/mapred-default.xml   |  9 
 .../mapreduce/TestJobResourceUploader.java  | 46 
 5 files changed, 81 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index c34f7bd..ce5fdc8 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -44,6 +44,10 @@
 
 
   org.apache.hadoop
+  hadoop-hdfs-client
+
+
+  org.apache.hadoop
   hadoop-hdfs
   test
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index f1cad57..d9bf988 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -36,6 +36,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 
@@ -94,6 +96,11 @@ class JobResourceUploader {
 new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
 mkdirs(jtFs, submitJobDir, mapredSysPerms);
 
+if (!conf.getBoolean(MRJobConfig.MR_AM_STAGING_DIR_ERASURECODING_ENABLED,
+MRJobConfig.DEFAULT_MR_AM_STAGING_ERASURECODING_ENABLED)) {
+  disableErasureCodingForPath(jtFs, submitJobDir);
+}
+
 Collection files = conf.getStringCollection("tmpfiles");
 Collection libjars = conf.getStringCollection("tmpjars");
 Collection archives = conf.getStringCollection("tmparchives");
@@ -575,4 +582,14 @@ class JobResourceUploader {
 }
 return finalPath;
   }
+
+  private void disableErasureCodingForPath(FileSystem fs, Path path)
+  throws IOException {
+if (jtFs instanceof DistributedFileSystem) {
+  LOG.info("Disabling Erasure Coding for path: " + path);
+  DistributedFileSystem dfs = (DistributedFileSystem) jtFs;
+  dfs.setErasureCodingPolicy(path,
+  SystemErasureCodingPolicies.getReplicationPolicy().getName());
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 2023ba3..86abb42 100644
--- 

[37/50] [abbrv] hadoop git commit: HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. Contributed by SammiChen.

2017-09-20 Thread jhung
HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. 
Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a12f09ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a12f09ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a12f09ba

Branch: refs/heads/YARN-5734
Commit: a12f09ba3c4a3aa4c4558090c5e1b7bcaebe3b94
Parents: ce943eb
Author: Andrew Wang 
Authored: Wed Sep 20 11:51:17 2017 -0700
Committer: Andrew Wang 
Committed: Wed Sep 20 11:51:17 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 +--
 .../hadoop/hdfs/DistributedFileSystem.java  |  4 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  4 +-
 .../hdfs/protocol/AddECPolicyResponse.java  | 68 
 .../AddErasureCodingPolicyResponse.java | 68 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  2 +-
 .../ClientNamenodeProtocolTranslatorPB.java | 11 ++--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 22 ---
 .../src/main/proto/erasurecoding.proto  |  2 +-
 .../src/main/proto/hdfs.proto   |  2 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java | 13 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 15 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  4 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  7 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 25 +++
 .../server/namenode/TestFSEditLogLoader.java|  4 +-
 .../hdfs/server/namenode/TestFSImage.java   |  5 +-
 19 files changed, 147 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7e8e95b..8d51a9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -102,7 +102,7 @@ import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -2807,13 +2807,14 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
   ErasureCodingPolicy[] policies) throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) {
   return namenode.addErasureCodingPolicies(policies);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 
@@ -2823,7 +2824,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) {
   namenode.removeErasureCodingPolicy(ecPolicyName);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index f6331cf..c9f4490 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ 

[11/50] [abbrv] hadoop git commit: HDFS-12470. DiskBalancer: Some tests create plan files under system directory. Contributed by Hanisha Koneru.

2017-09-20 Thread jhung
HDFS-12470. DiskBalancer: Some tests create plan files under system directory. 
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2dcba18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2dcba18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2dcba18

Branch: refs/heads/YARN-5734
Commit: a2dcba18531c6fa4b76325f5132773f12ddfc6d5
Parents: a4f9c7c
Author: Arpit Agarwal 
Authored: Mon Sep 18 09:53:24 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Sep 18 09:53:24 2017 -0700

--
 .../server/diskbalancer/command/TestDiskBalancerCommand.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2dcba18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index b0b0b0c..1cebae0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -476,9 +476,12 @@ public class TestDiskBalancerCommand {
   public void testPlanJsonNode() throws Exception {
 final String planArg = String.format("-%s %s", PLAN,
 "a87654a9-54c7-4693-8dd9-c9c7021dc340");
+final Path testPath = new Path(
+PathUtils.getTestPath(getClass()),
+GenericTestUtils.getMethodName());
 final String cmdLine = String
 .format(
-"hdfs diskbalancer %s", planArg);
+"hdfs diskbalancer -out %s %s", testPath, planArg);
 runCommand(cmdLine);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: YARN-5953:Create CLI for changing YARN configurations. (Jonathan Hung via xgong)

2017-09-20 Thread jhung
YARN-5953:Create CLI for changing YARN configurations. (Jonathan Hung via xgong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6684c9af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6684c9af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6684c9af

Branch: refs/heads/YARN-5734
Commit: 6684c9af744bec5d8a4948dbb7f8bfd6e96ca304
Parents: 1ac2ccf
Author: Xuan 
Authored: Fri Jul 7 14:16:46 2017 -0700
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:40:53 2017 -0700

--
 hadoop-yarn-project/hadoop-yarn/bin/yarn|   3 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd|   5 +
 .../hadoop/yarn/client/cli/SchedConfCLI.java| 238 +++
 .../yarn/client/cli/TestSchedConfCLI.java   | 160 +
 .../hadoop/yarn/webapp/dao/package-info.java|  27 +++
 .../yarn/webapp/util/YarnWebServiceUtils.java   |  14 ++
 .../ConfigurationMutationACLPolicy.java |   2 +-
 .../DefaultConfigurationMutationACLPolicy.java  |   2 +-
 .../scheduler/MutableConfScheduler.java |   2 +-
 .../scheduler/MutableConfigurationProvider.java |   2 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +-
 .../conf/MutableCSConfigurationProvider.java|   4 +-
 ...ueueAdminConfigurationMutationACLPolicy.java |   4 +-
 .../resourcemanager/webapp/RMWebServices.java   |   1 +
 .../webapp/dao/QueueConfigInfo.java |   4 +-
 .../webapp/dao/SchedConfUpdateInfo.java |  18 +-
 .../TestConfigurationMutationACLPolicies.java   |   4 +-
 .../TestMutableCSConfigurationProvider.java |   4 +-
 .../TestRMWebServicesConfigurationMutation.java |  65 +++--
 19 files changed, 507 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6684c9af/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index dcde0dc..331fcfe 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -142,6 +142,9 @@ function yarncmd_case
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
   HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.router.Router'
 ;;
+schedconf)
+HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.SchedConfCLI'
+;;
 scmadmin)
   HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin'
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6684c9af/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index 690badf..7ec9848 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -295,6 +295,11 @@ goto :eof
   set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
   goto :eof
 
+:schedconf
+  set CLASS=org.apache.hadoop.yarn.client.cli.SchedConfCLI
+  set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+  goto :eof
+
 @rem This changes %1, %2 etc. Hence those cannot be used after calling this.
 :make_command_arguments
   if "%1" == "--config" (

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6684c9af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
new file mode 100644
index 000..e17062e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -0,0 +1,238 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package 

[43/50] [abbrv] hadoop git commit: YARN-5949. Add pluggable configuration ACL policy interface and implementation. (Jonathan Hung via wangda)

2017-09-20 Thread jhung
YARN-5949. Add pluggable configuration ACL policy interface and implementation. 
(Jonathan Hung via wangda)

Change-Id: Ib98e82ff753bede21fcab2e6ca9ec1e7a5a2008f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78baf500
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78baf500
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78baf500

Branch: refs/heads/YARN-5734
Commit: 78baf500b2f4bc7de732fe64820e776d000cec55
Parents: fbcc60c
Author: Wangda Tan 
Authored: Mon May 22 13:38:31 2017 -0700
Committer: Jonathan Hung 
Committed: Wed Sep 20 17:40:53 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +
 .../src/main/resources/yarn-default.xml |  11 ++
 .../ConfigurationMutationACLPolicy.java |  47 ++
 .../ConfigurationMutationACLPolicyFactory.java  |  49 ++
 .../DefaultConfigurationMutationACLPolicy.java  |  45 ++
 .../scheduler/MutableConfScheduler.java |  19 ++-
 .../scheduler/MutableConfigurationProvider.java |   8 +-
 .../scheduler/capacity/CapacityScheduler.java   |   6 +-
 .../conf/MutableCSConfigurationProvider.java| 151 +-
 ...ueueAdminConfigurationMutationACLPolicy.java |  96 
 .../resourcemanager/webapp/RMWebServices.java   | 131 +---
 .../TestConfigurationMutationACLPolicies.java   | 154 +++
 .../TestMutableCSConfigurationProvider.java |  40 +++--
 13 files changed, 610 insertions(+), 150 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78baf500/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2f77316..e1062d7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -680,6 +680,9 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_CONFIGURATION_STORE =
   MEMORY_CONFIGURATION_STORE;
 
+  public static final String RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS =
+  YARN_PREFIX + "scheduler.configuration.mutation.acl-policy.class";
+
   public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
   + "authorization-provider";
   private static final List RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78baf500/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index f05e005..86aa15e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3360,4 +3360,15 @@
 memory
   
 
+  
+
+  The class to use for configuration mutation ACL policy if using a mutable
+  configuration provider. Controls whether a mutation request is allowed.
+  The DefaultConfigurationMutationACLPolicy checks if the requestor is a
+  YARN admin.
+
+yarn.scheduler.configuration.mutation.acl-policy.class
+
org.apache.hadoop.yarn.server.resourcemanager.scheduler.DefaultConfigurationMutationACLPolicy
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78baf500/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
new file mode 100644
index 000..724487b
--- /dev/null
+++ 

[02/50] [abbrv] hadoop git commit: YARN-7174. Add retry logic in LogsCLI when fetch running application logs. Contributed by Xuan Gong.

2017-09-20 Thread jhung
YARN-7174. Add retry logic in LogsCLI when fetch running application logs. 
Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a84c24b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a84c24b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a84c24b

Branch: refs/heads/YARN-5734
Commit: 1a84c24b0cf6674fa755403971fa57d8e412b320
Parents: 90894c7
Author: Junping Du 
Authored: Fri Sep 15 15:33:24 2017 -0700
Committer: Junping Du 
Committed: Fri Sep 15 15:33:24 2017 -0700

--
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  | 175 +++-
 .../hadoop/yarn/client/cli/TestLogsCLI.java | 205 +--
 2 files changed, 309 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a84c24b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 1a3db26..9a8ba4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -22,6 +22,9 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.PrintStream;
+import java.net.ConnectException;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -75,9 +78,11 @@ import org.codehaus.jettison.json.JSONObject;
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.filter.ClientFilter;
 
 @Public
 @Evolving
@@ -98,14 +103,27 @@ public class LogsCLI extends Configured implements Tool {
   = "show_container_log_info";
   private static final String OUT_OPTION = "out";
   private static final String SIZE_OPTION = "size";
+  private static final String CLIENT_MAX_RETRY_OPTION = "client_max_retries";
+  private static final String CLIENT_RETRY_INTERVAL_OPTION
+  = "client_retry_interval_ms";
   public static final String HELP_CMD = "help";
+
   private PrintStream outStream = System.out;
   private YarnClient yarnClient = null;
+  private Client webServiceClient = null;
+
+  private static final int DEFAULT_MAX_RETRIES = 30;
+  private static final long DEFAULT_RETRY_INTERVAL = 1000;
+
+  @Private
+  @VisibleForTesting
+  ClientConnectionRetry connectionRetry;
 
   @Override
   public int run(String[] args) throws Exception {
 try {
   yarnClient = createYarnClient();
+  webServiceClient = Client.create();
   return runCommand(args);
 } finally {
   if (yarnClient != null) {
@@ -140,6 +158,8 @@ public class LogsCLI extends Configured implements Tool {
 List amContainersList = new ArrayList();
 String localDir = null;
 long bytes = Long.MAX_VALUE;
+int maxRetries = DEFAULT_MAX_RETRIES;
+long retryInterval = DEFAULT_RETRY_INTERVAL;
 try {
   CommandLine commandLine = parser.parse(opts, args, false);
   appIdStr = commandLine.getOptionValue(APPLICATION_ID_OPTION);
@@ -171,6 +191,14 @@ public class LogsCLI extends Configured implements Tool {
   if (commandLine.hasOption(SIZE_OPTION)) {
 bytes = Long.parseLong(commandLine.getOptionValue(SIZE_OPTION));
   }
+  if (commandLine.hasOption(CLIENT_MAX_RETRY_OPTION)) {
+maxRetries = Integer.parseInt(commandLine.getOptionValue(
+CLIENT_MAX_RETRY_OPTION));
+  }
+  if (commandLine.hasOption(CLIENT_RETRY_INTERVAL_OPTION)) {
+retryInterval = Long.parseLong(commandLine.getOptionValue(
+CLIENT_RETRY_INTERVAL_OPTION));
+  }
 } catch (ParseException e) {
   System.err.println("options parsing failed: " + e.getMessage());
   printHelpMessage(printOpts);
@@ -232,6 +260,11 @@ public class LogsCLI extends Configured implements Tool {
   }
 }
 
+// Set up Retry WebService Client
+connectionRetry = new ClientConnectionRetry(maxRetries, retryInterval);
+ClientJerseyRetryFilter retryFilter = new 

[23/50] [abbrv] hadoop git commit: HDFS-12479. Some misuses of lock in DFSStripedOutputStream. Contributed by Huafeng Wang

2017-09-20 Thread jhung
HDFS-12479. Some misuses of lock in DFSStripedOutputStream. Contributed by 
Huafeng Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dba7a7dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dba7a7dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dba7a7dd

Branch: refs/heads/YARN-5734
Commit: dba7a7dd9d70adfab36a78eb55059c54e553a5cb
Parents: 2018538
Author: Kai Zheng 
Authored: Tue Sep 19 17:45:41 2017 +0800
Committer: Kai Zheng 
Committed: Tue Sep 19 17:45:41 2017 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dba7a7dd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 44db3a6..66eec7a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -63,6 +63,7 @@ import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
@@ -85,11 +86,10 @@ public class DFSStripedOutputStream extends DFSOutputStream
 private final List queues;
 
 MultipleBlockingQueue(int numQueue, int queueSize) {
-  List list = new ArrayList<>(numQueue);
+  queues = new ArrayList<>(numQueue);
   for (int i = 0; i < numQueue; i++) {
-list.add(new LinkedBlockingQueue(queueSize));
+queues.add(new LinkedBlockingQueue(queueSize));
   }
-  queues = Collections.synchronizedList(list);
 }
 
 void offer(int i, T object) {
@@ -156,8 +156,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
   followingBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
   endBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
   newBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
-  updateStreamerMap = Collections.synchronizedMap(
-  new HashMap(numAllBlocks));
+  updateStreamerMap = new ConcurrentHashMap<>(numAllBlocks);
   streamerUpdateResult = new MultipleBlockingQueue<>(numAllBlocks, 1);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: YARN-7149. Cross-queue preemption sometimes starves an underserved queue. (Eric Payne via wangda)

2017-09-20 Thread jhung
YARN-7149. Cross-queue preemption sometimes starves an underserved queue. (Eric 
Payne via wangda)

Change-Id: Ib269991dbebce160378e8372ee6d24849c4a5ed6
(cherry picked from commit 3dfa937a1fadfc62947755872515f549b3b15e6a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38c14ef8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38c14ef8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38c14ef8

Branch: refs/heads/YARN-5734
Commit: 38c14ef8d8a094a7101917eb77d90f5e62324f61
Parents: 958e8c0
Author: Wangda Tan 
Authored: Fri Sep 15 21:25:21 2017 -0700
Committer: Wangda Tan 
Committed: Fri Sep 15 21:29:39 2017 -0700

--
 .../scheduler/capacity/UsersManager.java|  4 +-
 .../capacity/TestContainerAllocation.java   | 50 
 .../scheduler/capacity/TestLeafQueue.java   |  8 ++--
 3 files changed, 57 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c14ef8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
index 5f7d185..33f30b0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
@@ -731,7 +731,9 @@ public class UsersManager implements AbstractUsersManager {
  * should be higher than queue-hard-limit * ulMin
  */
 float usersSummedByWeight = activeUsersTimesWeights;
-Resource resourceUsed = totalResUsageForActiveUsers.getUsed(nodePartition);
+Resource resourceUsed = Resources.add(
+totalResUsageForActiveUsers.getUsed(nodePartition),
+required);
 
 // For non-activeUser calculation, consider all users count.
 if (!activeUser) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c14ef8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index dd6b25b..906febf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.SecurityUtilTestHelper;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -887,4 +888,53 @@ public class TestContainerAllocation {
 
 rm1.close();
   }
+
+
+
+  @Test(timeout = 6)
+  public void testUserLimitAllocationMultipleContainers() throws Exception {
+CapacitySchedulerConfiguration newConf =
+(CapacitySchedulerConfiguration) TestUtils
+.getConfigurationWithMultipleQueues(conf);
+newConf.setUserLimit("root.c", 50);
+MockRM rm1 = new MockRM(newConf);
+
+rm1.getRMContext().setNodeLabelManager(mgr);
+rm1.start();
+MockNM nm1 = rm1.registerNode("h1:1234", 1000 * GB);
+
+// launch app from 1st user to queue C, AM container should be launched in 
nm1
+

[17/50] [abbrv] hadoop git commit: MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe

2017-09-20 Thread jhung
MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. 
Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3d61304
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3d61304
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3d61304

Branch: refs/heads/YARN-5734
Commit: b3d61304f2fa4a99526f7a60ccaac9f262083079
Parents: 1ee2527
Author: Jason Lowe 
Authored: Mon Sep 18 17:04:43 2017 -0500
Committer: Jason Lowe 
Committed: Mon Sep 18 17:04:43 2017 -0500

--
 .../org/apache/hadoop/mapred/ShuffleHandler.java  | 18 +++---
 1 file changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d61304/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 863da7e..06a3e42 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,13 +992,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
 
-  // this audit log is disabled by default,
-  // to turn it on please enable this audit log
-  // on log4j.properties by uncommenting the setting
-  if (AUDITLOG.isDebugEnabled()) {
-AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
- " reducer " + reduceQ.get(0));
-  }
   int reduceId;
   String jobId;
   try {
@@ -1183,6 +1176,17 @@ public class ShuffleHandler extends AuxiliaryService {
 
   // Now set the response headers.
   setResponseHeaders(response, keepAliveParam, contentLength);
+
+  // this audit log is disabled by default,
+  // to turn it on please enable this audit log
+  // on log4j.properties by uncommenting the setting
+  if (AUDITLOG.isDebugEnabled()) {
+StringBuilder sb = new StringBuilder("shuffle for ").append(jobId);
+sb.append(" mappers: ").append(mapIds);
+sb.append(" reducer ").append(reduce);
+sb.append(" length ").append(contentLength);
+AUDITLOG.debug(sb.toString());
+  }
 }
 
 protected void setResponseHeaders(HttpResponse response,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: HDFS-12323. NameNode terminates after full GC thinking QJM unresponsive if full GC is much longer than timeout. Contributed by Erik Krogen. [Forced Update!]

2017-09-20 Thread jhung
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 9726e1fc2 -> 034e6f4f8 (forced update)


HDFS-12323. NameNode terminates after full GC thinking QJM unresponsive if full 
GC is much longer than timeout. Contributed by Erik Krogen.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90894c72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90894c72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90894c72

Branch: refs/heads/YARN-5734
Commit: 90894c7262df0243e795b675f3ac9f7b322ccd11
Parents: b9b607d
Author: Erik Krogen 
Authored: Thu Sep 14 15:53:33 2017 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Sep 15 13:56:27 2017 -0700

--
 .../hadoop/hdfs/qjournal/client/QuorumCall.java | 65 
 .../hdfs/qjournal/client/TestQuorumCall.java| 31 +-
 2 files changed, 82 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90894c72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
index dc32318..dee74e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
@@ -24,7 +24,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StopWatch;
-import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.Timer;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -35,6 +35,7 @@ import com.google.common.util.concurrent.ListenableFuture;
 import com.google.protobuf.Message;
 import com.google.protobuf.TextFormat;
 
+
 /**
  * Represents a set of calls for which a quorum of results is needed.
  * @param  a key used to identify each of the outgoing calls
@@ -60,11 +61,12 @@ class QuorumCall {
* fraction of the configured timeout for any call.
*/
   private static final float WAIT_PROGRESS_WARN_THRESHOLD = 0.7f;
-  private final StopWatch quorumStopWatch = new StopWatch();
+  private final StopWatch quorumStopWatch;
+  private final Timer timer;
   
   static  QuorumCall create(
-  Map calls) {
-final QuorumCall qr = new QuorumCall();
+  Map calls, Timer timer) {
+final QuorumCall qr = new QuorumCall(timer);
 for (final Entry e : 
calls.entrySet()) {
   Preconditions.checkArgument(e.getValue() != null,
   "null future for key: " + e.getKey());
@@ -82,18 +84,53 @@ class QuorumCall {
 }
 return qr;
   }
-  
+
+  static  QuorumCall create(
+  Map calls) {
+return create(calls, new Timer());
+  }
+
+  /**
+   * Not intended for outside use.
+   */
   private QuorumCall() {
+this(new Timer());
+  }
+
+  private QuorumCall(Timer timer) {
 // Only instantiated from factory method above
+this.timer = timer;
+this.quorumStopWatch = new StopWatch(timer);
   }
 
+  /**
+   * Used in conjunction with {@link #getQuorumTimeoutIncreaseMillis(long, 
int)}
+   * to check for pauses.
+   */
   private void restartQuorumStopWatch() {
 quorumStopWatch.reset().start();
   }
 
-  private boolean shouldIncreaseQuorumTimeout(long offset, int millis) {
+  /**
+   * Check for a pause (e.g. GC) since the last time
+   * {@link #restartQuorumStopWatch()} was called. If detected, return the
+   * length of the pause; else, -1.
+   * @param offset Offset the elapsed time by this amount; use if some amount
+   *   of pause was expected
+   * @param millis Total length of timeout in milliseconds
+   * @return Length of pause, if detected, else -1
+   */
+  private long getQuorumTimeoutIncreaseMillis(long offset, int millis) {
 long elapsed = quorumStopWatch.now(TimeUnit.MILLISECONDS);
-return elapsed + offset > (millis * WAIT_PROGRESS_INFO_THRESHOLD);
+long pauseTime = elapsed + offset;
+if (pauseTime > (millis * WAIT_PROGRESS_INFO_THRESHOLD)) {
+  QuorumJournalManager.LOG.info("Pause detected while waiting for " +
+  "QuorumCall response; increasing timeout threshold by pause time " +
+  "of " + pauseTime + " ms.");
+  return 

hadoop git commit: HDFS-12515. Ozone: mvn package compilation fails on HDFS-7240. Contributed by Anu Engineer.

2017-09-20 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 2a94ce912 -> 244e7a5f6


HDFS-12515. Ozone: mvn package compilation fails on HDFS-7240. Contributed by 
Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/244e7a5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/244e7a5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/244e7a5f

Branch: refs/heads/HDFS-7240
Commit: 244e7a5f65c3611d2091d91a8899ac5785fcff3c
Parents: 2a94ce9
Author: Anu Engineer 
Authored: Wed Sep 20 13:45:11 2017 -0700
Committer: Anu Engineer 
Committed: Wed Sep 20 13:45:11 2017 -0700

--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/244e7a5f/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
--
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
index 4a50ed3..b29de2e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
@@ -34,6 +34,13 @@
   
 
   
+  
+  
+  org.slf4j
+  slf4j-api
+  
+  
+
 
   commons-cli
   commons-cli


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. Contributed by SammiChen.

2017-09-20 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 0006ee681 -> 1792093ba


HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. 
Contributed by SammiChen.

(cherry picked from commit a12f09ba3c4a3aa4c4558090c5e1b7bcaebe3b94)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1792093b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1792093b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1792093b

Branch: refs/heads/branch-3.0
Commit: 1792093bae273bf2e07b9ddb3628265aee9c747e
Parents: 0006ee6
Author: Andrew Wang 
Authored: Wed Sep 20 11:51:17 2017 -0700
Committer: Andrew Wang 
Committed: Wed Sep 20 11:51:21 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 +--
 .../hadoop/hdfs/DistributedFileSystem.java  |  4 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  4 +-
 .../hdfs/protocol/AddECPolicyResponse.java  | 68 
 .../AddErasureCodingPolicyResponse.java | 68 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  2 +-
 .../ClientNamenodeProtocolTranslatorPB.java | 11 ++--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 22 ---
 .../src/main/proto/erasurecoding.proto  |  2 +-
 .../src/main/proto/hdfs.proto   |  2 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java | 13 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 15 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  4 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  7 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 25 +++
 .../server/namenode/TestFSEditLogLoader.java|  4 +-
 .../hdfs/server/namenode/TestFSImage.java   |  5 +-
 19 files changed, 147 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1792093b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7e8e95b..8d51a9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -102,7 +102,7 @@ import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -2807,13 +2807,14 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
   ErasureCodingPolicy[] policies) throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) {
   return namenode.addErasureCodingPolicies(policies);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 
@@ -2823,7 +2824,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) {
   namenode.removeErasureCodingPolicy(ecPolicyName);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1792093b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index f6331cf..c9f4490 100644
--- 

hadoop git commit: HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. Contributed by SammiChen.

2017-09-20 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk ce943eb17 -> a12f09ba3


HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. 
Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a12f09ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a12f09ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a12f09ba

Branch: refs/heads/trunk
Commit: a12f09ba3c4a3aa4c4558090c5e1b7bcaebe3b94
Parents: ce943eb
Author: Andrew Wang 
Authored: Wed Sep 20 11:51:17 2017 -0700
Committer: Andrew Wang 
Committed: Wed Sep 20 11:51:17 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 +--
 .../hadoop/hdfs/DistributedFileSystem.java  |  4 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  4 +-
 .../hdfs/protocol/AddECPolicyResponse.java  | 68 
 .../AddErasureCodingPolicyResponse.java | 68 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  2 +-
 .../ClientNamenodeProtocolTranslatorPB.java | 11 ++--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 22 ---
 .../src/main/proto/erasurecoding.proto  |  2 +-
 .../src/main/proto/hdfs.proto   |  2 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java | 13 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 15 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  4 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  7 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 25 +++
 .../server/namenode/TestFSEditLogLoader.java|  4 +-
 .../hdfs/server/namenode/TestFSImage.java   |  5 +-
 19 files changed, 147 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7e8e95b..8d51a9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -102,7 +102,7 @@ import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -2807,13 +2807,14 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
   ErasureCodingPolicy[] policies) throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) {
   return namenode.addErasureCodingPolicies(policies);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 
@@ -2823,7 +2824,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) {
   namenode.removeErasureCodingPolicy(ecPolicyName);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index f6331cf..c9f4490 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 

hadoop git commit: HDFS-11035. Better documentation for maintenace mode and upgrade domain.

2017-09-20 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7dd662eaf -> 3bb23f4be


HDFS-11035. Better documentation for maintenace mode and upgrade domain.

(cherry picked from commit ce943eb17a4218d8ac1f5293c6726122371d8442)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bb23f4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bb23f4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bb23f4b

Branch: refs/heads/branch-2
Commit: 3bb23f4be9bf91c8fefd77ad6ef0aa3dd7ae9820
Parents: 7dd662e
Author: Ming Ma 
Authored: Wed Sep 20 09:36:33 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:42:22 2017 -0700

--
 .../src/site/markdown/HdfsDataNodeAdminGuide.md | 165 ++
 .../src/site/markdown/HdfsUpgradeDomain.md  | 167 +++
 hadoop-project/src/site/site.xml|   2 +
 3 files changed, 334 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bb23f4b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
new file mode 100644
index 000..d6f288e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
@@ -0,0 +1,165 @@
+
+
+HDFS DataNode Admin Guide
+=
+
+
+
+Overview
+
+
+The Hadoop Distributed File System (HDFS) namenode maintains states of all 
datanodes.
+There are two types of states. The fist type describes the liveness of a 
datanode indicating if
+the node is live, dead or stale. The second type describes the admin state 
indicating if the node
+is in service, decommissioned or under maintenance.
+
+When an administrator decommission a datanode, the datanode will first be 
transitioned into
+`DECOMMISSION_INPROGRESS` state. After all blocks belonging to that datanode 
have been fully replicated elsewhere
+based on each block's replication factor. the datanode will be transitioned to 
`DECOMMISSIONED` state. After that,
+the administrator can shutdown the node to perform long-term repair and 
maintenance that could take days or weeks.
+After the machine has been repaired, the machine can be recommissioned back to 
the cluster.
+
+Sometimes administrators only need to take datanodes down for minutes/hours to 
perform short-term repair/maintenance.
+In such scenario, the HDFS block replication overhead incurred by decommission 
might not be necessary and a light-weight process is desirable.
+And that is what maintenance state is used for. When an administrator put a 
datanode in maintenance state, the datanode will first be transitioned
+to `ENTERING_MAINTENANCE` state. As long as all blocks belonging to that 
datanode is minimally replicated elsewhere, the datanode
+will immediately be transitioned to `IN_MAINTENANCE` state. After the 
maintenance has completed, the administrator can take the datanode
+out of the maintenance state. In addition, maintenance state supports timeout 
that allows administrators to config the maximum duration in
+which a datanode is allowed to stay in maintenance state. After the timeout, 
the datanode will be transitioned out of maintenance state
+automatically by HDFS without human intervention.
+
+In summary, datanode admin operations include the followings:
+
+* Decommission
+* Recommission
+* Putting nodes in maintenance state
+* Taking nodes out of maintenance state
+
+And datanode admin states include the followings:
+
+* `NORMAL` The node is in service.
+* `DECOMMISSIONED` The node has been decommissioned.
+* `DECOMMISSION_INPROGRESS` The node is being transitioned to DECOMMISSIONED 
state.
+* `IN_MAINTENANCE` The node in in maintenance state.
+* `ENTERING_MAINTENANCE` The node is being transitioned to maintenance state.
+
+
+Host-level settings
+---
+
+To perform any of datanode admin operations, there are two steps.
+
+* Update host-level configuration files to indicate the desired admin states 
of targeted datanodes. There are two supported formats for configuration files.
+* Hostname-only configuration. Each line includes the hostname/ip address 
for a datanode. That is the default format.
+* JSON-based configuration. The configuration is in JSON format. Each 
element maps to one datanode and each datanode can have multiple properties. 
This format is required to put datanodes to maintenance states.
+
+* Run the following command to have namenode reload the host-level 
configuration files.
+`hdfs dfsadmin [-refreshNodes]`
+
+### Hostname-only configuration
+This is the default configuration used by 

hadoop git commit: HDFS-11035. Better documentation for maintenace mode and upgrade domain.

2017-09-20 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 816933722 -> 0006ee681


HDFS-11035. Better documentation for maintenace mode and upgrade domain.

(cherry picked from commit ce943eb17a4218d8ac1f5293c6726122371d8442)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0006ee68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0006ee68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0006ee68

Branch: refs/heads/branch-3.0
Commit: 0006ee681a047d8dc7501df1d5dd141cdb0f279e
Parents: 8169337
Author: Ming Ma 
Authored: Wed Sep 20 09:36:33 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:38:15 2017 -0700

--
 .../src/site/markdown/HdfsDataNodeAdminGuide.md | 165 ++
 .../src/site/markdown/HdfsUpgradeDomain.md  | 167 +++
 hadoop-project/src/site/site.xml|   4 +-
 3 files changed, 335 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0006ee68/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
new file mode 100644
index 000..d6f288e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
@@ -0,0 +1,165 @@
+
+
+HDFS DataNode Admin Guide
+=
+
+
+
+Overview
+
+
+The Hadoop Distributed File System (HDFS) namenode maintains states of all 
datanodes.
+There are two types of states. The fist type describes the liveness of a 
datanode indicating if
+the node is live, dead or stale. The second type describes the admin state 
indicating if the node
+is in service, decommissioned or under maintenance.
+
+When an administrator decommission a datanode, the datanode will first be 
transitioned into
+`DECOMMISSION_INPROGRESS` state. After all blocks belonging to that datanode 
have been fully replicated elsewhere
+based on each block's replication factor. the datanode will be transitioned to 
`DECOMMISSIONED` state. After that,
+the administrator can shutdown the node to perform long-term repair and 
maintenance that could take days or weeks.
+After the machine has been repaired, the machine can be recommissioned back to 
the cluster.
+
+Sometimes administrators only need to take datanodes down for minutes/hours to 
perform short-term repair/maintenance.
+In such scenario, the HDFS block replication overhead incurred by decommission 
might not be necessary and a light-weight process is desirable.
+And that is what maintenance state is used for. When an administrator put a 
datanode in maintenance state, the datanode will first be transitioned
+to `ENTERING_MAINTENANCE` state. As long as all blocks belonging to that 
datanode is minimally replicated elsewhere, the datanode
+will immediately be transitioned to `IN_MAINTENANCE` state. After the 
maintenance has completed, the administrator can take the datanode
+out of the maintenance state. In addition, maintenance state supports timeout 
that allows administrators to config the maximum duration in
+which a datanode is allowed to stay in maintenance state. After the timeout, 
the datanode will be transitioned out of maintenance state
+automatically by HDFS without human intervention.
+
+In summary, datanode admin operations include the followings:
+
+* Decommission
+* Recommission
+* Putting nodes in maintenance state
+* Taking nodes out of maintenance state
+
+And datanode admin states include the followings:
+
+* `NORMAL` The node is in service.
+* `DECOMMISSIONED` The node has been decommissioned.
+* `DECOMMISSION_INPROGRESS` The node is being transitioned to DECOMMISSIONED 
state.
+* `IN_MAINTENANCE` The node in in maintenance state.
+* `ENTERING_MAINTENANCE` The node is being transitioned to maintenance state.
+
+
+Host-level settings
+---
+
+To perform any of datanode admin operations, there are two steps.
+
+* Update host-level configuration files to indicate the desired admin states 
of targeted datanodes. There are two supported formats for configuration files.
+* Hostname-only configuration. Each line includes the hostname/ip address 
for a datanode. That is the default format.
+* JSON-based configuration. The configuration is in JSON format. Each 
element maps to one datanode and each datanode can have multiple properties. 
This format is required to put datanodes to maintenance states.
+
+* Run the following command to have namenode reload the host-level 
configuration files.
+`hdfs dfsadmin [-refreshNodes]`
+
+### Hostname-only configuration
+This is the default 

hadoop git commit: HDFS-11035. Better documentation for maintenace mode and upgrade domain.

2017-09-20 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 230b85d58 -> ce943eb17


HDFS-11035. Better documentation for maintenace mode and upgrade domain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce943eb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce943eb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce943eb1

Branch: refs/heads/trunk
Commit: ce943eb17a4218d8ac1f5293c6726122371d8442
Parents: 230b85d
Author: Ming Ma 
Authored: Wed Sep 20 09:36:33 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:36:33 2017 -0700

--
 .../src/site/markdown/HdfsDataNodeAdminGuide.md | 165 ++
 .../src/site/markdown/HdfsUpgradeDomain.md  | 167 +++
 hadoop-project/src/site/site.xml|   4 +-
 3 files changed, 335 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce943eb1/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
new file mode 100644
index 000..d6f288e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
@@ -0,0 +1,165 @@
+
+
+HDFS DataNode Admin Guide
+=
+
+
+
+Overview
+
+
+The Hadoop Distributed File System (HDFS) namenode maintains states of all 
datanodes.
+There are two types of states. The fist type describes the liveness of a 
datanode indicating if
+the node is live, dead or stale. The second type describes the admin state 
indicating if the node
+is in service, decommissioned or under maintenance.
+
+When an administrator decommission a datanode, the datanode will first be 
transitioned into
+`DECOMMISSION_INPROGRESS` state. After all blocks belonging to that datanode 
have been fully replicated elsewhere
+based on each block's replication factor. the datanode will be transitioned to 
`DECOMMISSIONED` state. After that,
+the administrator can shutdown the node to perform long-term repair and 
maintenance that could take days or weeks.
+After the machine has been repaired, the machine can be recommissioned back to 
the cluster.
+
+Sometimes administrators only need to take datanodes down for minutes/hours to 
perform short-term repair/maintenance.
+In such scenario, the HDFS block replication overhead incurred by decommission 
might not be necessary and a light-weight process is desirable.
+And that is what maintenance state is used for. When an administrator put a 
datanode in maintenance state, the datanode will first be transitioned
+to `ENTERING_MAINTENANCE` state. As long as all blocks belonging to that 
datanode is minimally replicated elsewhere, the datanode
+will immediately be transitioned to `IN_MAINTENANCE` state. After the 
maintenance has completed, the administrator can take the datanode
+out of the maintenance state. In addition, maintenance state supports timeout 
that allows administrators to config the maximum duration in
+which a datanode is allowed to stay in maintenance state. After the timeout, 
the datanode will be transitioned out of maintenance state
+automatically by HDFS without human intervention.
+
+In summary, datanode admin operations include the followings:
+
+* Decommission
+* Recommission
+* Putting nodes in maintenance state
+* Taking nodes out of maintenance state
+
+And datanode admin states include the followings:
+
+* `NORMAL` The node is in service.
+* `DECOMMISSIONED` The node has been decommissioned.
+* `DECOMMISSION_INPROGRESS` The node is being transitioned to DECOMMISSIONED 
state.
+* `IN_MAINTENANCE` The node in in maintenance state.
+* `ENTERING_MAINTENANCE` The node is being transitioned to maintenance state.
+
+
+Host-level settings
+---
+
+To perform any of datanode admin operations, there are two steps.
+
+* Update host-level configuration files to indicate the desired admin states 
of targeted datanodes. There are two supported formats for configuration files.
+* Hostname-only configuration. Each line includes the hostname/ip address 
for a datanode. That is the default format.
+* JSON-based configuration. The configuration is in JSON format. Each 
element maps to one datanode and each datanode can have multiple properties. 
This format is required to put datanodes to maintenance states.
+
+* Run the following command to have namenode reload the host-level 
configuration files.
+`hdfs dfsadmin [-refreshNodes]`
+
+### Hostname-only configuration
+This is the default configuration used by the namenode. It only supports node 
decommission and 

hadoop git commit: HDFS-12473. Change hosts JSON file format.

2017-09-20 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 a81167e2e -> c54310a63


HDFS-12473. Change hosts JSON file format.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c54310a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c54310a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c54310a6

Branch: refs/heads/branch-2.8
Commit: c54310a6383f075eeb6c8b61efcd045cb610c5cd
Parents: a81167e
Author: Ming Ma 
Authored: Wed Sep 20 09:21:32 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:21:32 2017 -0700

--
 .../hdfs/util/CombinedHostsFileReader.java  | 75 ++--
 .../hdfs/util/CombinedHostsFileWriter.java  | 26 +++
 .../CombinedHostFileManager.java|  3 +-
 .../hdfs/util/TestCombinedHostsFileReader.java  | 47 +++-
 .../src/test/resources/dfs.hosts.json   | 12 ++--
 .../src/test/resources/legacy.dfs.hosts.json|  5 ++
 6 files changed, 107 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c54310a6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
index 33acb91..f88aaef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
@@ -18,58 +18,87 @@
 
 package org.apache.hadoop.hdfs.util;
 
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.EOFException;
 import java.io.FileInputStream;
 import java.io.InputStreamReader;
 import java.io.IOException;
 import java.io.Reader;
-
+import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.Set;
-import java.util.HashSet;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.map.ObjectMapper;
-
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
- * Reader support for JSON based datanode configuration, an alternative
+ * Reader support for JSON-based datanode configuration, an alternative format
  * to the exclude/include files configuration.
- * The JSON file format is the array of elements where each element
+ * The JSON file format defines the array of elements where each element
  * in the array describes the properties of a datanode. The properties of
- * a datanode is defined in {@link DatanodeAdminProperties}. For example,
+ * a datanode is defined by {@link DatanodeAdminProperties}. For example,
  *
- * {"hostName": "host1"}
- * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
- * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * [
+ *   {"hostName": "host1"},
+ *   {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
+ *   {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * ]
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 public final class CombinedHostsFileReader {
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(CombinedHostsFileReader.class);
+
   private CombinedHostsFileReader() {
   }
 
   /**
* Deserialize a set of DatanodeAdminProperties from a json file.
-   * @param hostsFile the input json file to read from.
+   * @param hostsFile the input json file to read from
* @return the set of DatanodeAdminProperties
* @throws IOException
*/
-  public static Set
+  public static DatanodeAdminProperties[]
   readFile(final String hostsFile) throws IOException {
-HashSet allDNs = new HashSet<>();
-ObjectMapper mapper = new ObjectMapper();
+DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
+ObjectMapper objectMapper = new ObjectMapper();
+boolean tryOldFormat = false;
 try (Reader input =
- new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
-  Iterator iterator =
-  mapper.readValues(new JsonFactory().createJsonParser(input),
-  DatanodeAdminProperties.class);
-  while (iterator.hasNext()) {
-DatanodeAdminProperties properties = iterator.next();
-

hadoop git commit: HDFS-12473. Change hosts JSON file format.

2017-09-20 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.2 e6597fe30 -> 7580a10e3


HDFS-12473. Change hosts JSON file format.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7580a10e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7580a10e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7580a10e

Branch: refs/heads/branch-2.8.2
Commit: 7580a10e3ebf6d1c58530af623cb27136b8a3de2
Parents: e6597fe
Author: Ming Ma 
Authored: Wed Sep 20 09:09:57 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:09:57 2017 -0700

--
 .../hdfs/util/CombinedHostsFileReader.java  | 75 ++--
 .../hdfs/util/CombinedHostsFileWriter.java  | 26 +++
 .../CombinedHostFileManager.java|  3 +-
 .../hdfs/util/TestCombinedHostsFileReader.java  | 47 +++-
 .../src/test/resources/dfs.hosts.json   | 12 ++--
 .../src/test/resources/legacy.dfs.hosts.json|  5 ++
 6 files changed, 107 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7580a10e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
index 33acb91..f88aaef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
@@ -18,58 +18,87 @@
 
 package org.apache.hadoop.hdfs.util;
 
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.EOFException;
 import java.io.FileInputStream;
 import java.io.InputStreamReader;
 import java.io.IOException;
 import java.io.Reader;
-
+import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.Set;
-import java.util.HashSet;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.map.ObjectMapper;
-
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
- * Reader support for JSON based datanode configuration, an alternative
+ * Reader support for JSON-based datanode configuration, an alternative format
  * to the exclude/include files configuration.
- * The JSON file format is the array of elements where each element
+ * The JSON file format defines the array of elements where each element
  * in the array describes the properties of a datanode. The properties of
- * a datanode is defined in {@link DatanodeAdminProperties}. For example,
+ * a datanode is defined by {@link DatanodeAdminProperties}. For example,
  *
- * {"hostName": "host1"}
- * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
- * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * [
+ *   {"hostName": "host1"},
+ *   {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
+ *   {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * ]
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 public final class CombinedHostsFileReader {
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(CombinedHostsFileReader.class);
+
   private CombinedHostsFileReader() {
   }
 
   /**
* Deserialize a set of DatanodeAdminProperties from a json file.
-   * @param hostsFile the input json file to read from.
+   * @param hostsFile the input json file to read from
* @return the set of DatanodeAdminProperties
* @throws IOException
*/
-  public static Set
+  public static DatanodeAdminProperties[]
   readFile(final String hostsFile) throws IOException {
-HashSet allDNs = new HashSet<>();
-ObjectMapper mapper = new ObjectMapper();
+DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
+ObjectMapper objectMapper = new ObjectMapper();
+boolean tryOldFormat = false;
 try (Reader input =
- new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
-  Iterator iterator =
-  mapper.readValues(new JsonFactory().createJsonParser(input),
-  DatanodeAdminProperties.class);
-  while (iterator.hasNext()) {
-DatanodeAdminProperties properties = iterator.next();
-

hadoop git commit: HDFS-12473. Change hosts JSON file format.

2017-09-20 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6581f2dea -> 7dd662eaf


HDFS-12473. Change hosts JSON file format.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dd662ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dd662ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dd662ea

Branch: refs/heads/branch-2
Commit: 7dd662eafd5448b9c858e61877632f5cecc0e13e
Parents: 6581f2d
Author: Ming Ma 
Authored: Wed Sep 20 09:08:41 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:08:41 2017 -0700

--
 .../hdfs/util/CombinedHostsFileReader.java  | 74 ++--
 .../hdfs/util/CombinedHostsFileWriter.java  | 23 +++---
 .../CombinedHostFileManager.java|  3 +-
 .../hdfs/util/TestCombinedHostsFileReader.java  | 44 +++-
 .../src/test/resources/dfs.hosts.json   | 16 +++--
 .../src/test/resources/legacy.dfs.hosts.json|  7 ++
 6 files changed, 106 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dd662ea/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
index 9b23ad0..f88aaef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
@@ -18,59 +18,87 @@
 
 package org.apache.hadoop.hdfs.util;
 
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.EOFException;
 import java.io.FileInputStream;
 import java.io.InputStreamReader;
 import java.io.IOException;
 import java.io.Reader;
+import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.Set;
-import java.util.HashSet;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.ObjectReader;
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
- * Reader support for JSON based datanode configuration, an alternative
+ * Reader support for JSON-based datanode configuration, an alternative format
  * to the exclude/include files configuration.
- * The JSON file format is the array of elements where each element
+ * The JSON file format defines the array of elements where each element
  * in the array describes the properties of a datanode. The properties of
- * a datanode is defined in {@link DatanodeAdminProperties}. For example,
+ * a datanode is defined by {@link DatanodeAdminProperties}. For example,
  *
- * {"hostName": "host1"}
- * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
- * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * [
+ *   {"hostName": "host1"},
+ *   {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
+ *   {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * ]
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 public final class CombinedHostsFileReader {
-  private static final ObjectReader READER =
-  new ObjectMapper().reader(DatanodeAdminProperties.class);
-  private static final JsonFactory JSON_FACTORY = new JsonFactory();
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(CombinedHostsFileReader.class);
 
   private CombinedHostsFileReader() {
   }
 
   /**
* Deserialize a set of DatanodeAdminProperties from a json file.
-   * @param hostsFile the input json file to read from.
+   * @param hostsFile the input json file to read from
* @return the set of DatanodeAdminProperties
* @throws IOException
*/
-  public static Set
+  public static DatanodeAdminProperties[]
   readFile(final String hostsFile) throws IOException {
-HashSet allDNs = new HashSet<>();
+DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
+ObjectMapper objectMapper = new ObjectMapper();
+boolean tryOldFormat = false;
 try (Reader input =
- new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
-  Iterator iterator =
-  

hadoop git commit: HDFS-12473. Change hosts JSON file format.

2017-09-20 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 5c158f2f5 -> 816933722


HDFS-12473. Change hosts JSON file format.

(cherry picked from commit 230b85d5865b7e08fb7aaeab45295b5b966011ef)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81693372
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81693372
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81693372

Branch: refs/heads/branch-3.0
Commit: 816933722af4d96a7b848a461f4228c2099c44c8
Parents: 5c158f2
Author: Ming Ma 
Authored: Wed Sep 20 09:03:59 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:05:56 2017 -0700

--
 .../hdfs/util/CombinedHostsFileReader.java  | 67 ++--
 .../hdfs/util/CombinedHostsFileWriter.java  | 23 ---
 .../CombinedHostFileManager.java|  3 +-
 .../hdfs/util/TestCombinedHostsFileReader.java  | 44 -
 .../src/test/resources/dfs.hosts.json   | 16 +++--
 .../src/test/resources/legacy.dfs.hosts.json|  7 ++
 6 files changed, 102 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81693372/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
index 8da5655..aa8e4c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
@@ -19,58 +19,85 @@
 package org.apache.hadoop.hdfs.util;
 
 import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.JsonMappingException;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
+
 import java.io.FileInputStream;
 import java.io.InputStreamReader;
 import java.io.IOException;
 import java.io.Reader;
+import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.Set;
-import java.util.HashSet;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
- * Reader support for JSON based datanode configuration, an alternative
+ * Reader support for JSON-based datanode configuration, an alternative format
  * to the exclude/include files configuration.
- * The JSON file format is the array of elements where each element
+ * The JSON file format defines the array of elements where each element
  * in the array describes the properties of a datanode. The properties of
- * a datanode is defined in {@link DatanodeAdminProperties}. For example,
+ * a datanode is defined by {@link DatanodeAdminProperties}. For example,
  *
- * {"hostName": "host1"}
- * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
- * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * [
+ *   {"hostName": "host1"},
+ *   {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
+ *   {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * ]
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 public final class CombinedHostsFileReader {
-  private static final ObjectReader READER =
-  new ObjectMapper().readerFor(DatanodeAdminProperties.class);
-  private static final JsonFactory JSON_FACTORY = new JsonFactory();
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(CombinedHostsFileReader.class);
 
   private CombinedHostsFileReader() {
   }
 
   /**
* Deserialize a set of DatanodeAdminProperties from a json file.
-   * @param hostsFile the input json file to read from.
+   * @param hostsFile the input json file to read from
* @return the set of DatanodeAdminProperties
* @throws IOException
*/
-  public static Set
+  public static DatanodeAdminProperties[]
   readFile(final String hostsFile) throws IOException {
-HashSet allDNs = new HashSet<>();
+DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
+ObjectMapper objectMapper = new ObjectMapper();
+boolean tryOldFormat = false;
 try (Reader input =
- new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
-  Iterator iterator =
-  READER.readValues(JSON_FACTORY.createParser(input));
-  while 

hadoop git commit: HDFS-12473. Change hosts JSON file format.

2017-09-20 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7e58b2478 -> 230b85d58


HDFS-12473. Change hosts JSON file format.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/230b85d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/230b85d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/230b85d5

Branch: refs/heads/trunk
Commit: 230b85d5865b7e08fb7aaeab45295b5b966011ef
Parents: 7e58b24
Author: Ming Ma 
Authored: Wed Sep 20 09:03:59 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:03:59 2017 -0700

--
 .../hdfs/util/CombinedHostsFileReader.java  | 67 ++--
 .../hdfs/util/CombinedHostsFileWriter.java  | 23 ---
 .../CombinedHostFileManager.java|  3 +-
 .../hdfs/util/TestCombinedHostsFileReader.java  | 44 -
 .../src/test/resources/dfs.hosts.json   | 16 +++--
 .../src/test/resources/legacy.dfs.hosts.json|  7 ++
 6 files changed, 102 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/230b85d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
index 8da5655..aa8e4c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
@@ -19,58 +19,85 @@
 package org.apache.hadoop.hdfs.util;
 
 import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.JsonMappingException;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
+
 import java.io.FileInputStream;
 import java.io.InputStreamReader;
 import java.io.IOException;
 import java.io.Reader;
+import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.Set;
-import java.util.HashSet;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
- * Reader support for JSON based datanode configuration, an alternative
+ * Reader support for JSON-based datanode configuration, an alternative format
  * to the exclude/include files configuration.
- * The JSON file format is the array of elements where each element
+ * The JSON file format defines the array of elements where each element
  * in the array describes the properties of a datanode. The properties of
- * a datanode is defined in {@link DatanodeAdminProperties}. For example,
+ * a datanode is defined by {@link DatanodeAdminProperties}. For example,
  *
- * {"hostName": "host1"}
- * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
- * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * [
+ *   {"hostName": "host1"},
+ *   {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
+ *   {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * ]
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 public final class CombinedHostsFileReader {
-  private static final ObjectReader READER =
-  new ObjectMapper().readerFor(DatanodeAdminProperties.class);
-  private static final JsonFactory JSON_FACTORY = new JsonFactory();
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(CombinedHostsFileReader.class);
 
   private CombinedHostsFileReader() {
   }
 
   /**
* Deserialize a set of DatanodeAdminProperties from a json file.
-   * @param hostsFile the input json file to read from.
+   * @param hostsFile the input json file to read from
* @return the set of DatanodeAdminProperties
* @throws IOException
*/
-  public static Set
+  public static DatanodeAdminProperties[]
   readFile(final String hostsFile) throws IOException {
-HashSet allDNs = new HashSet<>();
+DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
+ObjectMapper objectMapper = new ObjectMapper();
+boolean tryOldFormat = false;
 try (Reader input =
- new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
-  Iterator iterator =
-  READER.readValues(JSON_FACTORY.createParser(input));
-  while (iterator.hasNext()) {
-DatanodeAdminProperties properties = iterator.next();
- 

svn commit: r1809030 - /hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

2017-09-20 Thread surendralilhore
Author: surendralilhore
Date: Wed Sep 20 13:33:34 2017
New Revision: 1809030

URL: http://svn.apache.org/viewvc?rev=1809030=rev
Log:
Added surendra in committer list

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1809030=1809029=1809030=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Wed 
Sep 20 13:33:34 2017
@@ -1428,6 +1428,14 @@

 

+ surendralilhore
+ Surendra Singh Lilhore
+ Huawei
+ HDFS
+ +5.5
+   
+
+   
  suresh
  http://people.apache.org/~suresh;>Suresh 
Srinivas
  Hortonworks



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-7308. Remove unused TaskLogAppender configurations from log4j.properties. Contributed by Todd Lipcon and J.Andreina.

2017-09-20 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bdd8433d4 -> 6581f2dea


HADOOP-7308. Remove unused TaskLogAppender configurations from 
log4j.properties. Contributed by Todd Lipcon and J.Andreina.

(cherry picked from commit 7e58b2478ce10f54b9b9a647f22a69dd528a81e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6581f2de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6581f2de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6581f2de

Branch: refs/heads/branch-2
Commit: 6581f2dea37f5162e51ca4cc9a1bf6698ff3ad2f
Parents: bdd8433
Author: Akira Ajisaka 
Authored: Wed Sep 20 21:07:45 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Sep 20 21:08:54 2017 +0900

--
 .../hadoop-common/src/main/conf/log4j.properties| 12 
 1 file changed, 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6581f2de/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 89b7d41..610fbf3 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -78,19 +78,7 @@ log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd 
HH:mm:ss} %p %c{2}:
 #
 # TaskLog Appender
 #
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
 log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
 
 log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
 log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-7308. Remove unused TaskLogAppender configurations from log4j.properties. Contributed by Todd Lipcon and J.Andreina.

2017-09-20 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 974605c02 -> 5c158f2f5


HADOOP-7308. Remove unused TaskLogAppender configurations from 
log4j.properties. Contributed by Todd Lipcon and J.Andreina.

(cherry picked from commit 7e58b2478ce10f54b9b9a647f22a69dd528a81e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c158f2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c158f2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c158f2f

Branch: refs/heads/branch-3.0
Commit: 5c158f2f51cc6657614580c4e19e319c9ccbbfab
Parents: 974605c
Author: Akira Ajisaka 
Authored: Wed Sep 20 21:07:45 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Sep 20 21:08:35 2017 +0900

--
 .../hadoop-common/src/main/conf/log4j.properties| 12 
 1 file changed, 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c158f2f/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index bc1fa6c..5f4b22b 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -78,19 +78,7 @@ log4j.appender.console.layout.ConversionPattern=%d{ISO8601} 
%p %c{2}: %m%n
 #
 # TaskLog Appender
 #
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
 log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
 
 log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
 log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-7308. Remove unused TaskLogAppender configurations from log4j.properties. Contributed by Todd Lipcon and J.Andreina.

2017-09-20 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk a9019e1fb -> 7e58b2478


HADOOP-7308. Remove unused TaskLogAppender configurations from 
log4j.properties. Contributed by Todd Lipcon and J.Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e58b247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e58b247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e58b247

Branch: refs/heads/trunk
Commit: 7e58b2478ce10f54b9b9a647f22a69dd528a81e6
Parents: a9019e1
Author: Akira Ajisaka 
Authored: Wed Sep 20 21:07:45 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Sep 20 21:07:49 2017 +0900

--
 .../hadoop-common/src/main/conf/log4j.properties| 12 
 1 file changed, 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e58b247/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index bc1fa6c..5f4b22b 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -78,19 +78,7 @@ log4j.appender.console.layout.ConversionPattern=%d{ISO8601} 
%p %c{2}: %m%n
 #
 # TaskLog Appender
 #
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
 log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
 
 log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
 log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12466. Ozone: KSM: Make ozone.ksm.address as mandatory property for client. Contributed by Nandakumar.

2017-09-20 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 d77c8107f -> 2a94ce912


HDFS-12466. Ozone: KSM: Make ozone.ksm.address as mandatory property for 
client. Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a94ce91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a94ce91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a94ce91

Branch: refs/heads/HDFS-7240
Commit: 2a94ce9124c7c96a6baa527c543575b74958afd9
Parents: d77c810
Author: Weiwei Yang 
Authored: Wed Sep 20 14:55:33 2017 +0800
Committer: Weiwei Yang 
Committed: Wed Sep 20 14:55:33 2017 +0800

--
 .../hadoop/ozone/client/OzoneClientFactory.java |  1 -
 .../hadoop/ozone/client/OzoneClientUtils.java   | 26 
 .../hadoop/ozone/client/rpc/RpcClient.java  | 25 ++-
 .../src/main/resources/ozone-default.xml|  2 +-
 4 files changed, 40 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a94ce91/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index 580cd11..cda95a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -158,7 +158,6 @@ public final class OzoneClientFactory {
   return ctor.newInstance(getConfiguration());
 } catch (Exception e) {
   final String message = "Couldn't create protocol " + protocolClass;
-  LOG.warn(message, e);
   if (e.getCause() instanceof IOException) {
 throw (IOException) e.getCause();
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a94ce91/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
index cc3632d..e192d87 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
@@ -322,6 +322,32 @@ public final class OzoneClientUtils {
   }
 
   /**
+   * Retrieve the socket address that should be used by clients to connect
+   * to KSM.
+   * @param conf
+   * @return Target InetSocketAddress for the KSM service endpoint.
+   */
+  public static InetSocketAddress getKsmAddressForClients(
+  Configuration conf) {
+final Optional host = getHostNameFromConfigKeys(conf,
+OZONE_KSM_ADDRESS_KEY);
+
+if (!host.isPresent()) {
+  throw new IllegalArgumentException(
+  OZONE_KSM_ADDRESS_KEY + " must be defined. See" +
+  " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
+  " details on configuring Ozone.");
+}
+
+// If no port number is specified then we'll just try the defaultBindPort.
+final Optional port = getPortNumberFromConfigKeys(conf,
+OZONE_KSM_ADDRESS_KEY);
+
+return NetUtils.createSocketAddr(
+host.get() + ":" + port.or(OZONE_KSM_PORT_DEFAULT));
+  }
+
+  /**
* Retrieve the socket address that is used by CBlock Service.
* @param conf
* @return Target InetSocketAddress for the CBlock Service endpoint.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a94ce91/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 6464c5d..e79d170 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -106,6 +106,19 @@ public class RpcClient implements ClientProtocol {
 this.groupRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS,