hadoop git commit: HADOOP-14944. Add JvmMetrics to KMS.

2017-10-19 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 12c81c67d -> a6370dde9


HADOOP-14944. Add JvmMetrics to KMS.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6370dde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6370dde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6370dde

Branch: refs/heads/branch-2
Commit: a6370dde90acc944ff466f44143f9e45ad5e7890
Parents: 12c81c6
Author: Xiao Chen 
Authored: Thu Oct 19 22:36:39 2017 -0700
Committer: Xiao Chen 
Committed: Thu Oct 19 22:37:35 2017 -0700

--
 .../hadoop/metrics2/source/JvmMetrics.java  | 16 +
 .../crypto/key/kms/server/KMSConfiguration.java |  9 +
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 23 
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 38 +++-
 4 files changed, 85 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6370dde/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
index c6369cd..e3f8754 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
@@ -58,6 +58,11 @@ public class JvmMetrics implements MetricsSource {
   }
   return impl;
 }
+
+synchronized void shutdown() {
+  DefaultMetricsSystem.instance().unregisterSource(JvmMetrics.name());
+  impl = null;
+}
   }
 
   @VisibleForTesting
@@ -81,6 +86,7 @@ public class JvmMetrics implements MetricsSource {
   final ConcurrentHashMap gcInfoCache =
   new ConcurrentHashMap();
 
+  @VisibleForTesting
   JvmMetrics(String processName, String sessionId) {
 this.processName = processName;
 this.sessionId = sessionId;
@@ -104,6 +110,16 @@ public class JvmMetrics implements MetricsSource {
 return Singleton.INSTANCE.init(processName, sessionId);
   }
 
+  /**
+   * Shutdown the JvmMetrics singleton. This is not necessary if the JVM itself
+   * is shutdown, but may be necessary for scenarios where JvmMetrics instance
+   * needs to be re-created while the JVM is still around. One such scenario
+   * is unit-testing.
+   */
+  public static void shutdownSingleton() {
+Singleton.INSTANCE.shutdown();
+  }
+
   @Override
   public void getMetrics(MetricsCollector collector, boolean all) {
 MetricsRecordBuilder rb = collector.addRecord(JvmMetrics)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6370dde/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index d825b2b..cb89561 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -63,6 +63,15 @@ public class KMSConfiguration {
   public static final String KMS_AUDIT_AGGREGATION_WINDOW = CONFIG_PREFIX +
   "audit.aggregation.window.ms";
 
+  // Process name shown in metrics
+  public static final String METRICS_PROCESS_NAME_KEY =
+  CONFIG_PREFIX + "metrics.process.name";
+  public static final String METRICS_PROCESS_NAME_DEFAULT = "KMS";
+
+  // Session id for metrics
+  public static final String METRICS_SESSION_ID_KEY =
+  CONFIG_PREFIX + "metrics.session.id";
+
   // KMS Audit logger classes to use
   public static final String KMS_AUDIT_LOGGER_KEY = CONFIG_PREFIX +
   "audit.logger";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6370dde/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index 5772036..80cb627 100644
--- 

[3/3] hadoop git commit: YARN-7170. Improve bower dependencies for YARN UI v2. (Sunil G via wangda)

2017-10-19 Thread vrushali
YARN-7170. Improve bower dependencies for YARN UI v2. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc2326f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc2326f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc2326f5

Branch: refs/heads/YARN-3368_branch2
Commit: bc2326f5f081e854be8d8a1519cfef10aff78323
Parents: 28c5498
Author: Wangda Tan 
Authored: Thu Oct 19 21:58:31 2017 -0700
Committer: vrushali 
Committed: Thu Oct 19 22:20:36 2017 -0700

--
 hadoop-project/pom.xml  | 4 
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml  | 2 +-
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc | 5 +
 3 files changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2326f5/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 575a9c4..2c8f4a9 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -132,6 +132,10 @@
 900
 1.11.199
 2.3.4
+1.5
+
+${project.version}
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2326f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 032a9a2..c604af7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -133,7 +133,7 @@
   
 com.github.eirslett
 frontend-maven-plugin
-1.2
+${frontend-maven-plugin.version}
 
   ${webappDir}
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2326f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 5b0b07d..959e169 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,7 +1,4 @@
 {
   "directory": "bower_components",
-  "analytics": false,
-  "resolvers": [
-"bower-shrinkwrap-resolver-ext"
-  ]
+  "analytics": false
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: YARN-7338. Support same origin policy for cross site scripting prevention. (Sunil G via wangda)

2017-10-19 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368_branch2 3a5f1a6c2 -> bc2326f5f


YARN-7338. Support same origin policy for cross site scripting prevention. 
(Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/017ac560
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/017ac560
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/017ac560

Branch: refs/heads/YARN-3368_branch2
Commit: 017ac560f0261e19925ee24999f7828c433dda11
Parents: 3a5f1a6
Author: Wangda Tan 
Authored: Thu Oct 19 14:44:42 2017 -0700
Committer: vrushali 
Committed: Thu Oct 19 21:48:11 2017 -0700

--
 .../org/apache/hadoop/yarn/webapp/WebApps.java  | 24 +++-
 1 file changed, 23 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/017ac560/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
index 0dc6354..3782c05 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -401,7 +401,8 @@ public class WebApps {
   WebApp webApp = build(webapp);
   HttpServer2 httpServer = webApp.httpServer();
   if (ui2Context != null) {
-httpServer.addContext(ui2Context, true);
+addFiltersForNewContext(ui2Context);
+httpServer.addHandlerAtFront(ui2Context);
   }
   try {
 httpServer.start();
@@ -413,6 +414,27 @@ public class WebApps {
   return webApp;
 }
 
+private void addFiltersForNewContext(WebAppContext ui2Context) {
+  Map params = getConfigParameters(csrfConfigPrefix);
+
+  if (hasCSRFEnabled(params)) {
+LOG.info("CSRF Protection has been enabled for the {} application. "
++ "Please ensure that there is an authentication mechanism "
++ "enabled (kerberos, custom, etc).", name);
+String restCsrfClassName = RestCsrfPreventionFilter.class.getName();
+HttpServer2.defineFilter(ui2Context, restCsrfClassName,
+restCsrfClassName, params, new String[]{"/*"});
+  }
+
+  params = getConfigParameters(xfsConfigPrefix);
+
+  if (hasXFSEnabled()) {
+String xfsClassName = XFrameOptionsFilter.class.getName();
+HttpServer2.defineFilter(ui2Context, xfsClassName, xfsClassName, 
params,
+new String[]{"/*"});
+  }
+}
+
 private String inferHostClass() {
   String thisClass = this.getClass().getName();
   Throwable t = new Throwable();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: Ensuring right apis are called as per jetty version in branch2 (trunk has different jetty)

2017-10-19 Thread vrushali
Ensuring right apis are called as per jetty version in branch2 (trunk has 
different jetty)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28c54989
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28c54989
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28c54989

Branch: refs/heads/YARN-3368_branch2
Commit: 28c54989d6a7cd70ca8965cafae60e0f38b01c92
Parents: 017ac56
Author: vrushali 
Authored: Thu Oct 19 22:18:17 2017 -0700
Committer: vrushali 
Committed: Thu Oct 19 22:18:17 2017 -0700

--
 .../src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28c54989/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
index 3782c05..26eb000 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -402,7 +402,7 @@ public class WebApps {
   HttpServer2 httpServer = webApp.httpServer();
   if (ui2Context != null) {
 addFiltersForNewContext(ui2Context);
-httpServer.addHandlerAtFront(ui2Context);
+httpServer.addContext(ui2Context, true);
   }
   try {
 httpServer.start();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7170. Improve bower dependencies for YARN UI v2. (Sunil G via wangda)

2017-10-19 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b7cb58a21 -> dc2ae3f20


YARN-7170. Improve bower dependencies for YARN UI v2. (Sunil G via wangda)

(cherry picked from commit 4afd308b62d2335f31064c05bfefaf2294d874b0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc2ae3f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc2ae3f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc2ae3f2

Branch: refs/heads/branch-3.0
Commit: dc2ae3f20b777e122afd11b684da65857323287f
Parents: b7cb58a
Author: Wangda Tan 
Authored: Thu Oct 19 21:58:31 2017 -0700
Committer: Wangda Tan 
Committed: Thu Oct 19 21:59:28 2017 -0700

--
 hadoop-project/pom.xml  | 1 +
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml  | 2 +-
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc | 5 +
 3 files changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc2ae3f2/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8aa4052..ef5932d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -137,6 +137,7 @@
 900
 1.11.199
 2.3.4
+1.5
 
 ${project.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc2ae3f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 42d6018..d292c8e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -132,7 +132,7 @@
   
 com.github.eirslett
 frontend-maven-plugin
-1.2
+${frontend-maven-plugin.version}
 
   ${webappDir}
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc2ae3f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 5b0b07d..959e169 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,7 +1,4 @@
 {
   "directory": "bower_components",
-  "analytics": false,
-  "resolvers": [
-"bower-shrinkwrap-resolver-ext"
-  ]
+  "analytics": false
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7170. Improve bower dependencies for YARN UI v2. (Sunil G via wangda)

2017-10-19 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk ce7cf66e5 -> 4afd308b6


YARN-7170. Improve bower dependencies for YARN UI v2. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4afd308b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4afd308b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4afd308b

Branch: refs/heads/trunk
Commit: 4afd308b62d2335f31064c05bfefaf2294d874b0
Parents: ce7cf66
Author: Wangda Tan 
Authored: Thu Oct 19 21:58:31 2017 -0700
Committer: Wangda Tan 
Committed: Thu Oct 19 21:58:40 2017 -0700

--
 hadoop-project/pom.xml  | 1 +
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml  | 2 +-
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc | 5 +
 3 files changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4afd308b/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8980f0e..3baa8f3 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -138,6 +138,7 @@
 900
 1.11.199
 2.3.4
+1.5
 
 ${project.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4afd308b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 7aa03ed..b552f35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -132,7 +132,7 @@
   
 com.github.eirslett
 frontend-maven-plugin
-1.2
+${frontend-maven-plugin.version}
 
   ${webappDir}
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4afd308b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 5b0b07d..959e169 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,7 +1,4 @@
 {
   "directory": "bower_components",
-  "analytics": false,
-  "resolvers": [
-"bower-shrinkwrap-resolver-ext"
-  ]
+  "analytics": false
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: YARN-7333. container-executor fails to remove entries from a directory that is not writable or executable. Contributed by Jason Lowe.

2017-10-19 Thread haibochen
YARN-7333. container-executor fails to remove entries from a directory that is 
not writable or executable. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4540ffd1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4540ffd1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4540ffd1

Branch: refs/heads/YARN-1011
Commit: 4540ffd15f1259d99a7847c60d44c51f29faa397
Parents: b7ff624
Author: Nathan Roberts 
Authored: Mon Oct 16 16:36:51 2017 -0500
Committer: Nathan Roberts 
Committed: Mon Oct 16 17:00:38 2017 -0500

--
 .../impl/container-executor.c   | 41 +---
 .../test/test-container-executor.c  |  8 
 2 files changed, 35 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4540ffd1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 08d69a5..3b04f88 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1731,18 +1731,19 @@ static int unlink_helper(int dirfd, const char *name, 
int flags) {
 }
 
 /**
- * Determine if an entry in a directory is a symlink.
+ * Determine if an entry in a directory is another directory without following
+ * symlinks.
  *
  * @param dirfd The directory file descriptor, or -1 if there is none.
  * @param name  If dirfd is -1, this is the path to examine.
  *  Otherwise, this is the file name in the directory to
  *  examine.
  *
- * @return  0 if the entry is not a symlink
- *  1 if the entry is a symlink
+ * @return  0 if the entry is a symlink or otherwise not a directory
+ *  1 if the entry is a directory
  *  A negative errno code if we couldn't access the entry.
  */
-static int is_symlink_helper(int dirfd, const char *name)
+static int is_dir_helper(int dirfd, const char *name)
 {
   struct stat stat;
 
@@ -1755,7 +1756,7 @@ static int is_symlink_helper(int dirfd, const char *name)
   return -errno;
 }
   }
-  return !!S_ISLNK(stat.st_mode);
+  return !!S_ISDIR(stat.st_mode);
 }
 
 static int recursive_unlink_helper(int dirfd, const char *name,
@@ -1765,30 +1766,29 @@ static int recursive_unlink_helper(int dirfd, const 
char *name,
   DIR *dfd = NULL;
   struct stat stat;
 
-  // Check to see if the file is a symlink.  If so, delete the symlink rather
-  // than what it points to.
-  ret = is_symlink_helper(dirfd, name);
+  // Check to see if the file is a directory. If not then we can unlink it now.
+  ret = is_dir_helper(dirfd, name);
   if (ret < 0) {
-// is_symlink_helper failed.
+// is_dir_helper failed.
 if (ret == -ENOENT) {
   ret = 0;
   goto done;
 }
 ret = -ret;
-fprintf(LOGFILE, "is_symlink_helper(%s) failed: %s\n",
+fprintf(LOGFILE, "is_dir_helper(%s) failed: %s\n",
 fullpath, strerror(ret));
 goto done;
-  } else if (ret == 1) {
-// is_symlink_helper determined that the path is a symlink.
+  } else if (ret == 0) {
+// is_dir_helper determined that the path is not a directory.
 ret = unlink_helper(dirfd, name, 0);
 if (ret) {
-  fprintf(LOGFILE, "failed to unlink symlink %s: %s\n",
+  fprintf(LOGFILE, "failed to unlink %s: %s\n",
   fullpath, strerror(ret));
 }
 goto done;
   }
 
-  // Open the file.  We use O_NOFOLLOW here to ensure that we if a symlink was
+  // Open the directory. We use O_NOFOLLOW here to ensure that if a symlink was
   // swapped in by an attacker, we will fail to follow it rather than deleting
   // something we potentially should not.
   fd = open_helper(dirfd, name);
@@ -1829,6 +1829,19 @@ static int recursive_unlink_helper(int dirfd, const char 
*name,
   goto done;
 }
   } else {
+// make sure the directory has full user permissions
+// so entries can be deleted
+if ((stat.st_mode & S_IRWXU) != S_IRWXU) {
+  ret = chmod_helper(dirfd, name, 0700);
+  if (ret) {
+if (ret == ENOENT) {
+  ret = 0;
+  goto 

[38/50] [abbrv] hadoop git commit: HADOOP-14880. [KMS] Document missing KMS client side configs. Contributed by Gabor Bota.

2017-10-19 Thread haibochen
HADOOP-14880. [KMS] Document missing KMS client side configs. Contributed 
by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97c70c7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97c70c7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97c70c7a

Branch: refs/heads/YARN-1011
Commit: 97c70c7ac6881f87eee1575bcbdd28b31ecac231
Parents: 60bfee2
Author: Wei-Chiu Chuang 
Authored: Thu Oct 19 06:02:13 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Oct 19 06:02:13 2017 -0700

--
 .../org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 8 +++-
 .../org/apache/hadoop/fs/CommonConfigurationKeysPublic.java | 9 +
 .../hadoop-common/src/main/resources/core-default.xml   | 8 
 .../org/apache/hadoop/crypto/key/kms/server/TestKMS.java| 3 ++-
 4 files changed, 22 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c70c7a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index c514beb..c324cd7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -121,10 +121,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 
   private static final String CONFIG_PREFIX = "hadoop.security.kms.client.";
 
-  /* It's possible to specify a timeout, in seconds, in the config file */
-  public static final String TIMEOUT_ATTR = CONFIG_PREFIX + "timeout";
-  public static final int DEFAULT_TIMEOUT = 60;
-
   /* Number of times to retry authentication in the event of auth failure
* (normally happens due to stale authToken) 
*/
@@ -361,7 +357,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 throw new IOException(ex);
   }
 }
-int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
+int timeout = conf.getInt(
+CommonConfigurationKeysPublic.KMS_CLIENT_TIMEOUT_SECONDS,
+CommonConfigurationKeysPublic.KMS_CLIENT_TIMEOUT_DEFAULT);
 authRetry = conf.getInt(AUTH_RETRY, DEFAULT_AUTH_RETRY);
 configurator = new TimeoutConnConfigurator(timeout, sslFactory);
 encKeyVersionQueue =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c70c7a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 4fda2b8..3c8628c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -726,6 +726,15 @@ public class CommonConfigurationKeysPublic {
* 
* core-default.xml
*/
+  public static final String KMS_CLIENT_TIMEOUT_SECONDS =
+  "hadoop.security.kms.client.timeout";
+  public static final int KMS_CLIENT_TIMEOUT_DEFAULT = 60;
+
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
   /** Default value is the number of providers specified. */
   public static final String KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY =
   "hadoop.security.kms.client.failover.max.retries";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c70c7a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index bde7a85..8db9f44 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2340,6 +2340,14 @@
 key will be dropped. Default = 12hrs
   
 
+
+  hadoop.security.kms.client.timeout
+  60
+  
+Sets value for KMS client connection timeout, and the read timeout
+to KMS servers.
+  
+
 
 
   hadoop.security.kms.client.failover.sleep.base.millis


[31/50] [abbrv] hadoop git commit: YARN-7311. Fix TestRMWebServicesReservation parametrization for fair scheduler. (Yufei Gu via Subru).

2017-10-19 Thread haibochen
YARN-7311. Fix TestRMWebServicesReservation parametrization for fair scheduler. 
(Yufei Gu via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75323394
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75323394
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75323394

Branch: refs/heads/YARN-1011
Commit: 75323394fbc4211596a2c8fbb5e584f3183f742f
Parents: acabc65
Author: Subru Krishnan 
Authored: Tue Oct 17 12:38:06 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Oct 17 12:38:06 2017 -0700

--
 .../reservation/FairReservationSystem.java  | 13 +
 .../webapp/TestRMWebServicesReservation.java| 28 +++-
 2 files changed, 28 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75323394/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
index 9bf92c2..611fca8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/FairReservationSystem.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 
@@ -87,4 +88,16 @@ public class FairReservationSystem extends 
AbstractReservationSystem {
 .getSteadyFairShare();
   }
 
+  @Override
+  public Plan getPlan(String planName) {
+// make sure plan name is a full queue name in fair scheduler. For example,
+// "root.default" is the full queue name for "default".
+FSQueue queue = fairScheduler.getQueueManager().getQueue(planName);
+
+if (queue != null) {
+  return super.getPlan(queue.getQueueName());
+} else {
+  return null;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75323394/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
index 657bec4..02aa65f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesReservation.java
@@ -145,21 +145,9 @@ public class TestRMWebServicesReservation extends 
JerseyTestBase {
   bind(GenericExceptionHandler.class);
   conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
   YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-  Configuration conf = new Configuration();
   conf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
-  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
-  YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
-  ResourceScheduler.class);
-  CapacitySchedulerConfiguration csconf =
-  new CapacitySchedulerConfiguration(conf);
-  

[48/50] [abbrv] hadoop git commit: YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)

2017-10-19 Thread haibochen
YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bb9569f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bb9569f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bb9569f

Branch: refs/heads/YARN-1011
Commit: 2bb9569fca462395360254ce70caad4e41ad02a5
Parents: ce7cf66
Author: Karthik Kambatla 
Authored: Fri Jan 29 14:31:45 2016 -0800
Committer: Haibo Chen 
Committed: Thu Oct 19 21:26:14 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 ++-
 .../src/main/resources/yarn-default.xml |  21 
 .../RegisterNodeManagerRequest.java |  14 ++-
 .../pb/RegisterNodeManagerRequestPBImpl.java|  45 +++-
 .../server/api/records/OverAllocationInfo.java  |  45 
 .../server/api/records/ResourceThresholds.java  |  45 
 .../impl/pb/OverAllocationInfoPBImpl.java   | 106 +++
 .../impl/pb/ResourceThresholdsPBImpl.java   |  93 
 .../yarn_server_common_service_protos.proto |  10 ++
 .../hadoop/yarn/server/nodemanager/Context.java |   5 +
 .../yarn/server/nodemanager/NodeManager.java|  17 +++
 .../nodemanager/NodeStatusUpdaterImpl.java  |   7 +-
 .../monitor/ContainersMonitorImpl.java  |  34 ++
 .../amrmproxy/BaseAMRMProxyTest.java|  11 ++
 14 files changed, 455 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bb9569f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 0d5f2cb..be7e64d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1824,7 +1824,6 @@ public class YarnConfiguration extends Configuration {
   public static final boolean 
DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE =
   false;
 
-
   // Configurations for applicaiton life time monitor feature
   public static final String RM_APPLICATION_MONITOR_INTERVAL_MS =
   RM_PREFIX + "application-timeouts.monitor.interval-ms";
@@ -1832,6 +1831,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
+  /** Overallocation (= allocation based on utilization) configs. */
+  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
+  NM_PREFIX + "overallocation.allocation-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
+  = 0f;
+  @Private
+  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
+  = 0f;
+
   /**
* Interval of time the linux container executor should try cleaning up
* cgroups entry when cleaning up a container. This is required due to what 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bb9569f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 4e78947..04ff0e5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1602,6 +1602,27 @@
   
 
   
+The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node,
+  expressed as a float between 0 and 0.95. By default, over-allocation is
+  turned off (value = 0). When turned on, the node allows running
+  OPPORTUNISTIC containers when the aggregate utilization is under the
+  value specified here multiplied by the node's advertised capacity.
+
+yarn.nodemanager.overallocation.allocation-threshold
+0f
+  
+
+  
+When a node is over-allocated to improve utilization by
+  

[33/50] [abbrv] hadoop git commit: YARN-6546. SLS is slow while loading 10k queues. (Yufei Gu via Haibo Chen)

2017-10-19 Thread haibochen
YARN-6546. SLS is slow while loading 10k queues. (Yufei Gu via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46eb1033
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46eb1033
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46eb1033

Branch: refs/heads/YARN-1011
Commit: 46eb1033a86ca53e7b94202567aef8af2417bdf8
Parents: f27a4ad
Author: Haibo Chen 
Authored: Tue Oct 17 16:03:46 2017 -0700
Committer: Haibo Chen 
Committed: Tue Oct 17 16:04:19 2017 -0700

--
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |  1 +
 .../sls/scheduler/CapacitySchedulerMetrics.java |  5 --
 .../sls/scheduler/FairSchedulerMetrics.java | 17 +
 .../sls/scheduler/FifoSchedulerMetrics.java |  5 +-
 .../sls/scheduler/SLSCapacityScheduler.java | 19 --
 .../yarn/sls/scheduler/SLSFairScheduler.java| 19 --
 .../yarn/sls/scheduler/SchedulerMetrics.java| 66 +---
 7 files changed, 36 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46eb1033/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 59f9c17..dfdf7c9 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -731,6 +731,7 @@ public class SLSRunner extends Configured implements Tool {
 }
 
 queueAppNumMap.put(queueName, appNum);
+wrapper.getSchedulerMetrics().trackQueue(queueName);
   }
 
   private void runNewAM(String jobType, String user,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46eb1033/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java
index a73f48c..89b44c6 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java
@@ -28,9 +28,4 @@ public class CapacitySchedulerMetrics extends 
SchedulerMetrics {
   public CapacitySchedulerMetrics() {
 super();
   }
-
-  @Override
-  public void trackQueue(String queueName) {
-trackedQueues.add(queueName);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46eb1033/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
index 7b306f0..a5aee74 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
@@ -167,8 +167,9 @@ public class FairSchedulerMetrics extends SchedulerMetrics {
   }
 
   @Override
-  public void trackQueue(String queueName) {
-trackedQueues.add(queueName);
+  protected void registerQueueMetrics(String queueName) {
+super.registerQueueMetrics(queueName);
+
 FairScheduler fair = (FairScheduler) scheduler;
 final FSQueue queue = fair.getQueueManager().getQueue(queueName);
 registerQueueMetrics(queue, Metric.DEMAND);
@@ -209,16 +210,4 @@ public class FairSchedulerMetrics extends SchedulerMetrics 
{
   }
 );
   }
-
-  @Override
-  public void untrackQueue(String queueName) {
-trackedQueues.remove(queueName);
-
-for (Metric metric: Metric.values()) {
-  metrics.remove("variable.queue." + queueName + "." +
-  metric.value + ".memory");
-  metrics.remove("variable.queue." + queueName + "." +
-  metric.value + ".vcores");
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46eb1033/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FifoSchedulerMetrics.java
--
diff --git 

[36/50] [abbrv] hadoop git commit: HADOOP-14958. Fix source-level compatibility after HADOOP-11252. Contributed by Junping Du.

2017-10-19 Thread haibochen
HADOOP-14958. Fix source-level compatibility after HADOOP-11252. Contributed by 
Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b016f08f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b016f08f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b016f08f

Branch: refs/heads/YARN-1011
Commit: b016f08f67830ed3ca741bc6a10c3f5164781be5
Parents: 2523e1c
Author: Junping Du 
Authored: Wed Oct 18 15:06:30 2017 -0700
Committer: Junping Du 
Committed: Wed Oct 18 15:06:30 2017 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/Client.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b016f08f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index c9ac615..a0417d6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -212,7 +212,8 @@ public class Client implements AutoCloseable {
* @param conf Configuration
* @param pingInterval the ping interval
*/
-  static final void setPingInterval(Configuration conf, int pingInterval) {
+  public static final void setPingInterval(Configuration conf,
+  int pingInterval) {
 conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, pingInterval);
   }
 
@@ -223,7 +224,7 @@ public class Client implements AutoCloseable {
* @param conf Configuration
* @return the ping interval
*/
-  static final int getPingInterval(Configuration conf) {
+  public static final int getPingInterval(Configuration conf) {
 return conf.getInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,
 CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: HDFS-12613. Native EC coder should implement release() as idempotent function. (Lei (Eddy) Xu)

2017-10-19 Thread haibochen
HDFS-12613. Native EC coder should implement release() as idempotent function. 
(Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31ebccc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31ebccc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31ebccc9

Branch: refs/heads/YARN-1011
Commit: 31ebccc96238136560f4210bdf6766fe18e0650c
Parents: b406d8e
Author: Lei Xu 
Authored: Mon Oct 16 19:44:30 2017 -0700
Committer: Lei Xu 
Committed: Mon Oct 16 19:44:30 2017 -0700

--
 .../io/erasurecode/coder/ErasureCodingStep.java |  5 +-
 .../erasurecode/coder/ErasureDecodingStep.java  |  5 +-
 .../erasurecode/coder/ErasureEncodingStep.java  |  5 +-
 .../coder/HHXORErasureDecodingStep.java | 12 ++--
 .../coder/HHXORErasureEncodingStep.java | 10 +++-
 .../io/erasurecode/coder/util/HHUtil.java   |  4 +-
 .../rawcoder/AbstractNativeRawDecoder.java  | 14 -
 .../rawcoder/AbstractNativeRawEncoder.java  | 14 -
 .../rawcoder/NativeRSRawDecoder.java| 11 ++--
 .../rawcoder/NativeRSRawEncoder.java| 11 ++--
 .../rawcoder/NativeXORRawDecoder.java   | 14 +++--
 .../rawcoder/NativeXORRawEncoder.java   |  9 +--
 .../rawcoder/RSLegacyRawDecoder.java|  6 +-
 .../erasurecode/rawcoder/RawErasureDecoder.java | 17 --
 .../erasurecode/rawcoder/RawErasureEncoder.java | 16 +++--
 .../apache/hadoop/io/erasurecode/jni_common.c   |  5 +-
 .../hadoop/io/erasurecode/jni_rs_decoder.c  |  9 ++-
 .../hadoop/io/erasurecode/jni_rs_encoder.c  |  9 ++-
 .../hadoop/io/erasurecode/jni_xor_decoder.c |  9 ++-
 .../hadoop/io/erasurecode/jni_xor_encoder.c |  9 ++-
 .../erasurecode/coder/TestErasureCoderBase.java | 18 +-
 .../coder/TestHHErasureCoderBase.java   | 10 +++-
 .../rawcoder/RawErasureCoderBenchmark.java  |  9 +--
 .../erasurecode/rawcoder/TestDummyRawCoder.java | 15 -
 .../rawcoder/TestNativeRSRawCoder.java  |  6 ++
 .../rawcoder/TestNativeXORRawCoder.java |  7 +++
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 61 ++--
 .../hadoop/hdfs/DFSStripedOutputStream.java |  2 +-
 .../hadoop/hdfs/PositionStripeReader.java   |  3 +-
 .../hadoop/hdfs/StatefulStripeReader.java   |  3 +-
 .../org/apache/hadoop/hdfs/StripeReader.java|  7 ++-
 .../StripedBlockChecksumReconstructor.java  |  2 +-
 .../erasurecode/StripedBlockReconstructor.java  |  2 +-
 .../apache/hadoop/hdfs/StripedFileTestUtil.java |  6 +-
 34 files changed, 269 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ebccc9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
index 9dd0aed..fb89d99 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
@@ -21,6 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECChunk;
 
+import java.io.IOException;
+
 /**
  * Erasure coding step that's involved in encoding/decoding of a block group.
  */
@@ -47,7 +49,8 @@ public interface ErasureCodingStep {
* @param inputChunks
* @param outputChunks
*/
-  void performCoding(ECChunk[] inputChunks, ECChunk[] outputChunks);
+  void performCoding(ECChunk[] inputChunks, ECChunk[] outputChunks)
+  throws IOException;
 
   /**
* Notify erasure coder that all the chunks of input blocks are processed so

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ebccc9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
index ae396a2..24f5547 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
@@ -22,6 

[41/50] [abbrv] hadoop git commit: YARN-7338. Support same origin policy for cross site scripting prevention. (Sunil G via wangda)

2017-10-19 Thread haibochen
YARN-7338. Support same origin policy for cross site scripting prevention. 
(Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/298b174f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/298b174f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/298b174f

Branch: refs/heads/YARN-1011
Commit: 298b174f663a06e67098f7b5cd645769c1a98a80
Parents: 3dd3d1d
Author: Wangda Tan 
Authored: Thu Oct 19 14:44:42 2017 -0700
Committer: Wangda Tan 
Committed: Thu Oct 19 14:44:42 2017 -0700

--
 .../org/apache/hadoop/yarn/webapp/WebApps.java  | 22 
 1 file changed, 22 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/298b174f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
index 9c5e8c3..4f1cacf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -401,6 +401,7 @@ public class WebApps {
   WebApp webApp = build(webapp);
   HttpServer2 httpServer = webApp.httpServer();
   if (ui2Context != null) {
+addFiltersForNewContext(ui2Context);
 httpServer.addHandlerAtFront(ui2Context);
   }
   try {
@@ -413,6 +414,27 @@ public class WebApps {
   return webApp;
 }
 
+private void addFiltersForNewContext(WebAppContext ui2Context) {
+  Map params = getConfigParameters(csrfConfigPrefix);
+
+  if (hasCSRFEnabled(params)) {
+LOG.info("CSRF Protection has been enabled for the {} application. "
++ "Please ensure that there is an authentication mechanism "
++ "enabled (kerberos, custom, etc).", name);
+String restCsrfClassName = RestCsrfPreventionFilter.class.getName();
+HttpServer2.defineFilter(ui2Context, restCsrfClassName,
+restCsrfClassName, params, new String[]{"/*"});
+  }
+
+  params = getConfigParameters(xfsConfigPrefix);
+
+  if (hasXFSEnabled()) {
+String xfsClassName = XFrameOptionsFilter.class.getName();
+HttpServer2.defineFilter(ui2Context, xfsClassName, xfsClassName, 
params,
+new String[]{"/*"});
+  }
+}
+
 private String inferHostClass() {
   String thisClass = this.getClass().getName();
   Throwable t = new Throwable();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HDFS-12620. Backporting HDFS-10467 to branch-2. Contributed by Inigo Goiri.

2017-10-19 Thread haibochen
HDFS-12620. Backporting HDFS-10467 to branch-2. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f1c0376
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f1c0376
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f1c0376

Branch: refs/heads/YARN-1011
Commit: 0f1c0376186de6446d595be7bb445ed6b71ae499
Parents: 7b4b018
Author: Inigo Goiri 
Authored: Thu Oct 19 18:08:45 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 18:08:45 2017 -0700

--
 .../org/apache/hadoop/hdfs/server/federation/MockResolver.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f1c0376/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index a481553..151d731 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -264,7 +264,10 @@ public class MockResolver
   @Override
   public PathLocation getDestinationForPath(String path) throws IOException {
 List remoteLocations = new LinkedList<>();
-for (String key : this.locations.keySet()) {
+// We go from the leaves to the root
+List keys = new ArrayList<>(this.locations.keySet());
+Collections.sort(keys, Collections.reverseOrder());
+for (String key : keys) {
   if (path.startsWith(key)) {
 for (RemoteLocation location : this.locations.get(key)) {
   String finalPath = location.getDest() + path.substring(key.length());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: YARN-7294. TestSignalContainer#testSignalRequestDeliveryToNM fails intermittently with Fair Scheduler. (Contributed by Miklos Szegedi)

2017-10-19 Thread haibochen
YARN-7294. TestSignalContainer#testSignalRequestDeliveryToNM fails 
intermittently with Fair Scheduler. (Contributed by Miklos Szegedi)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbd2b73e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbd2b73e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbd2b73e

Branch: refs/heads/YARN-1011
Commit: cbd2b73ef81a7e275c5d4f842cac5b81ff2f8c84
Parents: c1b08ba
Author: Yufei Gu 
Authored: Thu Oct 19 16:39:25 2017 -0700
Committer: Yufei Gu 
Committed: Thu Oct 19 16:39:25 2017 -0700

--
 .../yarn/server/resourcemanager/TestSignalContainer.java | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbd2b73e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
index 2688987..fac0b96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 import java.util.ArrayList;
 import java.util.List;
 
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.junit.Assert;
 
 import org.apache.commons.logging.Log;
@@ -50,6 +51,10 @@ public class TestSignalContainer {
 Logger rootLogger = LogManager.getRootLogger();
 rootLogger.setLevel(Level.DEBUG);
 MockRM rm = new MockRM();
+FairScheduler fs = null;
+if (rm.getResourceScheduler().getClass() == FairScheduler.class) {
+  fs = (FairScheduler)rm.getResourceScheduler();
+}
 rm.start();
 
 MockNM nm1 = rm.registerNode("h1:1234", 5000);
@@ -78,6 +83,9 @@ public class TestSignalContainer {
   List allocation = am.allocate(new 
ArrayList(),
   new ArrayList()).getAllocatedContainers();
   conts.addAll(allocation);
+  if (fs != null) {
+nm1.nodeHeartbeat(true);
+  }
 }
 Assert.assertEquals(request, conts.size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HDFS-12642. Log block and datanode details in BlockRecoveryWorker.

2017-10-19 Thread haibochen
HDFS-12642. Log block and datanode details in BlockRecoveryWorker.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21bc8555
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21bc8555
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21bc8555

Branch: refs/heads/YARN-1011
Commit: 21bc85558718490e558c5b3bdb44c9c64eada994
Parents: 7bd7009
Author: Xiao Chen 
Authored: Mon Oct 16 10:33:16 2017 -0700
Committer: Xiao Chen 
Committed: Mon Oct 16 10:34:06 2017 -0700

--
 .../hdfs/server/datanode/BlockRecoveryWorker.java | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21bc8555/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 15c4af1..2ecd986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -197,10 +197,9 @@ public class BlockRecoveryWorker {
   long blockId = (isTruncateRecovery) ?
   rBlock.getNewBlock().getBlockId() : block.getBlockId();
 
-  if (LOG.isDebugEnabled()) {
-LOG.debug("block=" + block + ", (length=" + block.getNumBytes()
-+ "), syncList=" + syncList);
-  }
+  LOG.info("BlockRecoveryWorker: block={} (length={}),"
+  + " isTruncateRecovery={}, syncList={}", block,
+  block.getNumBytes(), isTruncateRecovery, syncList);
 
   // syncList.isEmpty() means that all data-nodes do not have the block
   // or their replicas have 0 length.
@@ -289,6 +288,11 @@ public class BlockRecoveryWorker {
 newBlock.setNumBytes(rBlock.getNewBlock().getNumBytes());
   }
 
+  LOG.info("BlockRecoveryWorker: block={} (length={}), bestState={},"
+  + " newBlock={} (length={}), participatingList={}",
+  block, block.getNumBytes(), bestState.name(), newBlock,
+  newBlock.getNumBytes(), participatingList);
+
   List failedList = new ArrayList<>();
   final List successList = new ArrayList<>();
   for (BlockRecord r : participatingList) {
@@ -542,7 +546,7 @@ public class BlockRecoveryWorker {
 ExtendedBlock block = rb.getBlock();
 DatanodeInfo[] targets = rb.getLocations();
 
-LOG.info(who + " calls recoverBlock(" + block
+LOG.info("BlockRecoveryWorker: " + who + " calls recoverBlock(" + block
 + ", targets=[" + Joiner.on(", ").join(targets) + "]"
 + ", newGenerationStamp=" + rb.getNewGenerationStamp()
 + ", newBlock=" + rb.getNewBlock()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler agnostic. (Contributed by Haibo Chen)

2017-10-19 Thread haibochen
YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler 
agnostic. (Contributed by Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b4b0187
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b4b0187
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b4b0187

Branch: refs/heads/YARN-1011
Commit: 7b4b0187806601e33f5a88d48991e7c12ee4419f
Parents: ca8ddc6
Author: Yufei Gu 
Authored: Thu Oct 19 16:51:29 2017 -0700
Committer: Yufei Gu 
Committed: Thu Oct 19 16:51:47 2017 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/TestAppManager.java  | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b4b0187/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index b24a309..8179321 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -305,8 +305,6 @@ public class TestAppManager{
   @Test
   public void testQueueSubmitWithNoPermission() throws IOException {
 YarnConfiguration conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
 conf.set(PREFIX + "root.acl_submit_applications", " ");
 conf.set(PREFIX + "root.acl_administer_queue", " ");
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HADOOP-14935. Azure: POSIX permissions are taking effect in access() method even when authorization is enabled. Contributed by Santhosh G Nayak

2017-10-19 Thread haibochen
HADOOP-14935. Azure: POSIX permissions are taking effect in access() method 
even when authorization is enabled.
Contributed by Santhosh G Nayak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fcc3a1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fcc3a1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fcc3a1f

Branch: refs/heads/YARN-1011
Commit: 9fcc3a1fc8cab873034f5c308ceb2d5671a954e8
Parents: 20575ec
Author: Steve Loughran 
Authored: Mon Oct 16 16:01:47 2017 +0100
Committer: Steve Loughran 
Committed: Mon Oct 16 16:01:47 2017 +0100

--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  63 +++--
 .../hadoop/fs/azure/security/Constants.java |   1 +
 .../TestNativeAzureFileSystemAuthorization.java | 280 ++-
 3 files changed, 254 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fcc3a1f/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 9effb3b..85a46ea 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -71,9 +71,11 @@ import 
org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
 import org.apache.hadoop.fs.azure.security.Constants;
 import org.apache.hadoop.fs.azure.security.RemoteWasbDelegationTokenManager;
 import org.apache.hadoop.fs.azure.security.WasbDelegationTokenManager;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
@@ -2650,22 +2652,6 @@ public class NativeAzureFileSystem extends FileSystem {
   public FileStatus getFileStatus(Path f) throws FileNotFoundException, 
IOException {
 
 LOG.debug("Getting the file status for {}", f.toString());
-
-// Capture the absolute path and the path to key.
-Path absolutePath = makeAbsolute(f);
-
-if (!isRenamePendingFile(absolutePath)) {
-  Path ancestor = getAncestor(absolutePath);
-  if (ancestor.equals(absolutePath) && !ancestor.equals(new Path("/"))) {
-performAuthCheck(ancestor.getParent(), 
WasbAuthorizationOperations.READ,
-"getFileStatus", absolutePath);
-  }
-  else {
-performAuthCheck(ancestor, WasbAuthorizationOperations.READ,
-"getFileStatus", absolutePath);
-  }
-}
-
 return getFileStatusInternal(f);
   }
 
@@ -2693,7 +2679,15 @@ public class NativeAzureFileSystem extends FileSystem {
 }
   }
 
-  protected FileStatus getFileStatusInternal(Path f) throws 
FileNotFoundException, IOException {
+  /**
+   * Inner implementation of {@link #getFileStatus(Path)}.
+   * Return a file status object that represents the path.
+   * @param f The path we want information from
+   * @return a FileStatus object
+   * @throws FileNotFoundException when the path does not exist
+   * @throws IOException Other failure
+   */
+  private FileStatus getFileStatusInternal(Path f) throws 
FileNotFoundException, IOException {
 
 Path absolutePath = makeAbsolute(f);
 String key = pathToKey(absolutePath);
@@ -3707,6 +3701,41 @@ public class NativeAzureFileSystem extends FileSystem {
 }
   }
 
+  @Override
+  public void access(Path path, FsAction mode) throws IOException {
+if (azureAuthorization && authorizer != null) {
+  try {
+// Required to check the existence of the path.
+getFileStatus(path);
+switch (mode) {
+case READ:
+case READ_EXECUTE:
+  performAuthCheck(path, WasbAuthorizationOperations.READ, "access", 
path);
+  break;
+case WRITE:
+case WRITE_EXECUTE:
+  performAuthCheck(path, WasbAuthorizationOperations.WRITE, "access",
+  path);
+  break;
+case READ_WRITE:
+case ALL:
+  performAuthCheck(path, WasbAuthorizationOperations.READ, "access", 
path);
+  performAuthCheck(path, WasbAuthorizationOperations.WRITE, "access",
+  path);
+  break;
+case EXECUTE:
+case NONE:
+

[50/50] [abbrv] hadoop git commit: YARN-6705 Add separate NM preemption thresholds for cpu and memory (Haibo Chen)

2017-10-19 Thread haibochen
YARN-6705 Add separate NM preemption thresholds for cpu and memory  (Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5a996e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5a996e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5a996e8

Branch: refs/heads/YARN-1011
Commit: e5a996e85778e08d80464d998c40b2493317314a
Parents: 85a6887
Author: Haibo Chen 
Authored: Wed Jul 12 12:32:13 2017 -0700
Committer: Haibo Chen 
Committed: Thu Oct 19 21:26:14 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 31 +--
 .../src/main/resources/yarn-default.xml | 34 ++--
 .../monitor/ContainersMonitorImpl.java  | 42 +---
 3 files changed, 85 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5a996e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f312f24..c8ab62a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1860,10 +1860,33 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_OVERALLOCATION_MEMORY_UTILIZATION_THRESHOLD =
   NM_PREFIX + "overallocation.memory-utilization-threshold";
 
-  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
-  NM_PREFIX + "overallocation.preemption-threshold";
-  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
-  = 0.96f;
+  /**
+   * The CPU utilization threshold, if went beyond for a few times in a row,
+   * OPPORTUNISTIC containers started due to overallocation should start
+   * getting preempted.
+   */
+  public static final String NM_OVERALLOCATION_CPU_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold.cpu";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_CPU_PREEMPTION_THRESHOLD = 0.99f;
+
+  /**
+   * The number of times that CPU utilization must go over the CPU preemption
+   * threshold consecutively before preemption starts to kick in.
+   */
+  public static final String NM_OVERALLOCATION_PREEMPTION_CPU_COUNT =
+  NM_PREFIX + "overallocation.preemption-threshold-count.cpu";
+  public static final int DEFAULT_NM_OVERALLOCATION_PREEMPTION_CPU_COUNT = 4;
+
+
+  /**
+   * The memory utilization threshold beyond which OPPORTUNISTIC containers
+   * started due to overallocation should start getting preempted.
+   */
+  public static final String NM_OVERALLOCATION_MEMORY_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold.memory";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_MEMORY_PREEMPTION_THRESHOLD = 0.95f;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5a996e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 3a98060..9e97ddf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1645,11 +1645,37 @@
 
   
 When a node is over-allocated to improve utilization by
-  running OPPORTUNISTIC containers, this config captures the utilization
-  beyond which OPPORTUNISTIC containers should start getting preempted.
+  running OPPORTUNISTIC containers, this config captures the CPU
+  utilization beyond which OPPORTUNISTIC containers should start getting
+  preempted. This is used in combination with
+  yarn.nodemanager.overallocation.preemption-threshold-count.cpu, that is,
+  only when the CPU utilization goes over this threshold consecutively for
+  a few times will preemption kicks in.
 
-yarn.nodemanager.overallocation.preemption-threshold
-0.96
+yarn.nodemanager.overallocation.preemption-threshold.cpu
+0.99
+  
+
+  
+When a node is over-allocated to 

[11/50] [abbrv] hadoop git commit: HADOOP-14938. Configuration.updatingResource map should be initialized lazily (mi...@cloudera.com via rkanter)

2017-10-19 Thread haibochen
HADOOP-14938. Configuration.updatingResource map should be initialized lazily 
(mi...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e163f418
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e163f418
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e163f418

Branch: refs/heads/YARN-1011
Commit: e163f41850bd09a17d3102a3af0af2e3cd831ab0
Parents: 7a27c2c
Author: Robert Kanter 
Authored: Fri Oct 13 13:52:58 2017 -0700
Committer: Robert Kanter 
Committed: Fri Oct 13 13:52:58 2017 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   | 97 
 1 file changed, 57 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e163f418/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 9d5bb1b..f94eba6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -290,9 +290,9 @@ public class Configuration implements 
Iterable>,
 
   /**
* Stores the mapping of key to the resource which modifies or loads 
-   * the key most recently
+   * the key most recently. Created lazily to avoid wasting memory.
*/
-  private Map updatingResource;
+  private volatile Map updatingResource;
 
   /**
* Specify exact input factory to avoid time finding correct one.
@@ -749,7 +749,6 @@ public class Configuration implements 
Iterable>,
*/
   public Configuration(boolean loadDefaults) {
 this.loadDefaults = loadDefaults;
-updatingResource = new ConcurrentHashMap();
 
 // Register all classes holding property tags with
 REGISTERED_TAG_CLASS.put("core", CorePropertyTag.class);
@@ -768,25 +767,27 @@ public class Configuration implements 
Iterable>,
*/
   @SuppressWarnings("unchecked")
   public Configuration(Configuration other) {
-   this.resources = (ArrayList) other.resources.clone();
-   synchronized(other) {
- if (other.properties != null) {
-   this.properties = (Properties)other.properties.clone();
- }
-
- if (other.overlay!=null) {
-   this.overlay = (Properties)other.overlay.clone();
- }
-
- this.updatingResource = new ConcurrentHashMap(
- other.updatingResource);
- this.finalParameters = Collections.newSetFromMap(
- new ConcurrentHashMap());
- this.finalParameters.addAll(other.finalParameters);
- this.REGISTERED_TAG_CLASS.putAll(other.REGISTERED_TAG_CLASS);
- this.propertyTagsMap.putAll(other.propertyTagsMap);
-   }
-   
+this.resources = (ArrayList) other.resources.clone();
+synchronized(other) {
+  if (other.properties != null) {
+this.properties = (Properties)other.properties.clone();
+  }
+
+  if (other.overlay!=null) {
+this.overlay = (Properties)other.overlay.clone();
+  }
+
+  if (other.updatingResource != null) {
+this.updatingResource = new ConcurrentHashMap(
+   other.updatingResource);
+  }
+  this.finalParameters = Collections.newSetFromMap(
+  new ConcurrentHashMap());
+  this.finalParameters.addAll(other.finalParameters);
+  this.REGISTERED_TAG_CLASS.putAll(other.REGISTERED_TAG_CLASS);
+  this.propertyTagsMap.putAll(other.propertyTagsMap);
+}
+
 synchronized(Configuration.class) {
   REGISTRY.put(this, null);
 }
@@ -1277,14 +1278,14 @@ public class Configuration implements 
Iterable>,
 String newSource = (source == null ? "programmatically" : source);
 
 if (!isDeprecated(name)) {
-  updatingResource.put(name, new String[] {newSource});
+  putIntoUpdatingResource(name, new String[] {newSource});
   String[] altNames = getAlternativeNames(name);
   if(altNames != null) {
 for(String n: altNames) {
   if(!n.equals(name)) {
 getOverlay().setProperty(n, value);
 getProps().setProperty(n, value);
-updatingResource.put(n, new String[] {newSource});
+putIntoUpdatingResource(n, new String[] {newSource});
   }
 }
   }
@@ -1295,7 +1296,7 @@ public class Configuration implements 

[04/50] [abbrv] hadoop git commit: HADOOP-13102. Update GroupsMapping documentation to reflect the new changes. Contributed by Esther Kundin.

2017-10-19 Thread haibochen
HADOOP-13102. Update GroupsMapping documentation to reflect the new changes. 
Contributed by Esther Kundin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/075358eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/075358eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/075358eb

Branch: refs/heads/YARN-1011
Commit: 075358eb6fff5ae4a40ac4dfde292e2a9a4ceddf
Parents: 8bcc49e
Author: Anu Engineer 
Authored: Wed Oct 11 15:58:20 2017 -0700
Committer: Anu Engineer 
Committed: Wed Oct 11 15:58:20 2017 -0700

--
 .../hadoop-common/src/site/markdown/GroupsMapping.md| 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/075358eb/hadoop-common-project/hadoop-common/src/site/markdown/GroupsMapping.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/GroupsMapping.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/GroupsMapping.md
index 89aca16..806ed54 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/GroupsMapping.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/GroupsMapping.md
@@ -85,9 +85,10 @@ This file should be readable only by the Unix user running 
the daemons.
 
 It is possible to set a maximum time limit when searching and awaiting a 
result.
 Set `hadoop.security.group.mapping.ldap.directory.search.timeout` to 0 if 
infinite wait period is desired. Default is 10,000 milliseconds (10 seconds).
+This is the limit for each ldap query.  If 
`hadoop.security.group.mapping.ldap.search.group.hierarchy.levels` is set to a 
positive value, then the total latency will be bounded by max(Recur Depth in 
LDAP, `hadoop.security.group.mapping.ldap.search.group.hierarchy.levels` ) * 
`hadoop.security.group.mapping.ldap.directory.search.timeout`.
 
-The implementation does not attempt to resolve group hierarchies. Therefore, a 
user must be an explicit member of a group object
-in order to be considered a member.
+`hadoop.security.group.mapping.ldap.base` configures how far to walk up the 
groups hierarchy when resolving groups.
+By default, with a limit of 0, in order to be considered a member of a group, 
the user must be an explicit member in LDAP.  Otherwise, it will traverse the 
group hierarchy 
`hadoop.security.group.mapping.ldap.search.group.hierarchy.levels` levels up.
 
 
 ### Active Directory ###


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: YARN-7124. LogAggregationTFileController deletes/renames while file is open. Contributed by Jason Lowe.

2017-10-19 Thread haibochen
YARN-7124. LogAggregationTFileController deletes/renames while file is open. 
Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fcbe7cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fcbe7cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fcbe7cf

Branch: refs/heads/YARN-1011
Commit: 1fcbe7cf5f2f7c609cd4158912f151848268ca9e
Parents: a50be1b
Author: Junping Du 
Authored: Mon Oct 16 13:57:03 2017 -0700
Committer: Junping Du 
Committed: Mon Oct 16 13:57:03 2017 -0700

--
 .../filecontroller/tfile/LogAggregationTFileController.java | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fcbe7cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
index 92e3a08..5064e26 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
@@ -96,8 +96,10 @@ public class LogAggregationTFileController
 
   @Override
   public void closeWriter() {
-this.writer.close();
-this.writer = null;
+if (this.writer != null) {
+  this.writer.close();
+  this.writer = null;
+}
   }
 
   @Override
@@ -117,6 +119,9 @@ public class LogAggregationTFileController
   record.increcleanupOldLogTimes();
 }
 
+// close the writer before the file is renamed or deleted
+closeWriter();
+
 final Path renamedPath = record.getRollingMonitorInterval() <= 0
 ? record.getRemoteNodeLogFileForApp() : new Path(
 record.getRemoteNodeLogFileForApp().getParent(),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: MAPREDUCE-6972. Enable try-with-resources for RecordReader. Contributed by Zoltan Haindrich.

2017-10-19 Thread haibochen
MAPREDUCE-6972. Enable try-with-resources for RecordReader. Contributed by 
Zoltan Haindrich.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2523e1cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2523e1cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2523e1cc

Branch: refs/heads/YARN-1011
Commit: 2523e1cce5f6d6c3762c958c7f3138ab486c8497
Parents: 86ee0c5
Author: Akira Ajisaka 
Authored: Wed Oct 18 11:18:03 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Oct 18 11:18:39 2017 +0900

--
 .../src/main/java/org/apache/hadoop/mapred/RecordReader.java   | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2523e1cc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
index 6e2c89f..0a996dc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapred;
 
+import java.io.Closeable;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -38,7 +39,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public interface RecordReader {
+public interface RecordReader extends Closeable{
   /** 
* Reads the next key/value pair from the input for processing.
*
@@ -74,7 +75,8 @@ public interface RecordReader {
* Close this {@link InputSplit} to future operations.
* 
* @throws IOException
-   */ 
+   */
+  @Override
   public void close() throws IOException;
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: HADOOP-13055. Implement linkMergeSlash and linkFallback for ViewFileSystem

2017-10-19 Thread haibochen
HADOOP-13055. Implement linkMergeSlash and linkFallback for ViewFileSystem


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/133d7ca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/133d7ca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/133d7ca7

Branch: refs/heads/YARN-1011
Commit: 133d7ca76e3d4b60292d57429d4259e80bec650a
Parents: 3fb4718
Author: Manoj Govindassamy 
Authored: Fri Oct 13 17:43:13 2017 -0700
Committer: Manoj Govindassamy 
Committed: Fri Oct 13 17:43:21 2017 -0700

--
 .../org/apache/hadoop/fs/viewfs/ConfigUtil.java |  68 +++-
 .../org/apache/hadoop/fs/viewfs/Constants.java  |  16 +-
 .../org/apache/hadoop/fs/viewfs/InodeTree.java  | 358 ---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  13 +-
 .../org/apache/hadoop/fs/viewfs/ViewFs.java |  14 +-
 .../fs/viewfs/ViewFileSystemBaseTest.java   |   4 +-
 .../hadoop-hdfs/src/site/markdown/ViewFs.md |  44 ++-
 .../viewfs/TestViewFileSystemLinkFallback.java  | 264 ++
 .../TestViewFileSystemLinkMergeSlash.java   | 234 
 9 files changed, 945 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/133d7ca7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
index a5fc62e..4c3dae9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs.viewfs;
 
 import java.net.URI;
+import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
@@ -68,7 +69,72 @@ public class ConfigUtil {
 addLink( conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, 
 src, target);   
   }
-  
+
+  /**
+   * Add a LinkMergeSlash to the config for the specified mount table.
+   * @param conf
+   * @param mountTableName
+   * @param target
+   */
+  public static void addLinkMergeSlash(Configuration conf,
+  final String mountTableName, final URI target) {
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH, target.toString());
+  }
+
+  /**
+   * Add a LinkMergeSlash to the config for the default mount table.
+   * @param conf
+   * @param target
+   */
+  public static void addLinkMergeSlash(Configuration conf, final URI target) {
+addLinkMergeSlash(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE,
+target);
+  }
+
+  /**
+   * Add a LinkFallback to the config for the specified mount table.
+   * @param conf
+   * @param mountTableName
+   * @param target
+   */
+  public static void addLinkFallback(Configuration conf,
+  final String mountTableName, final URI target) {
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_FALLBACK, target.toString());
+  }
+
+  /**
+   * Add a LinkFallback to the config for the default mount table.
+   * @param conf
+   * @param target
+   */
+  public static void addLinkFallback(Configuration conf, final URI target) {
+addLinkFallback(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE,
+target);
+  }
+
+  /**
+   * Add a LinkMerge to the config for the specified mount table.
+   * @param conf
+   * @param mountTableName
+   * @param targets
+   */
+  public static void addLinkMerge(Configuration conf,
+  final String mountTableName, final URI[] targets) {
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_MERGE, Arrays.toString(targets));
+  }
+
+  /**
+   * Add a LinkMerge to the config for the default mount table.
+   * @param conf
+   * @param targets
+   */
+  public static void addLinkMerge(Configuration conf, final URI[] targets) {
+addLinkMerge(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, targets);
+  }
+
   /**
*
* @param conf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/133d7ca7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index 1a07c10..aa1bc7e 100644
--- 

[42/50] [abbrv] hadoop git commit: YARN-7345. GPU Isolation: Incorrect minor device numbers written to devices.deny file. (Jonathan Hung via wangda)

2017-10-19 Thread haibochen
YARN-7345. GPU Isolation: Incorrect minor device numbers written to 
devices.deny file. (Jonathan Hung via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1b08ba7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1b08ba7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1b08ba7

Branch: refs/heads/YARN-1011
Commit: c1b08ba720486e74461f0ec94a204c1ba4014c06
Parents: 298b174
Author: Wangda Tan 
Authored: Thu Oct 19 14:45:44 2017 -0700
Committer: Wangda Tan 
Committed: Thu Oct 19 14:45:44 2017 -0700

--
 .../container-executor/impl/modules/gpu/gpu-module.c   |  2 +-
 .../test/modules/gpu/test-gpu-module.cc| 13 +
 2 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1b08ba7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
index f96645d..1a1b164 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
@@ -108,7 +108,7 @@ static int internal_handle_gpu_request(
 char param_value[128];
 memset(param_value, 0, sizeof(param_value));
 snprintf(param_value, sizeof(param_value), "c %d:%d rwm",
- major_device_number, i);
+ major_device_number, minor_devices[i]);
 
 int rc = update_cgroups_parameters_func_p("devices", "deny",
   container_id, param_value);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1b08ba7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
index 7e41fb4..b3d93dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
@@ -165,6 +165,19 @@ TEST_F(TestGpuModule, 
test_verify_gpu_module_calls_cgroup_parameter) {
 
   // Verify cgroups parameters
   verify_param_updated_to_cgroups(0, NULL);
+
+  /* Test case 3: block 2 non-sequential devices */
+  cgroups_parameters_invoked.clear();
+  char* argv_2[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", 
(char*) "1,3",
+   (char*) "--container_id", container_id };
+  rc = handle_gpu_request(_update_cgroups_parameters,
+ "gpu", 5, argv_2);
+  ASSERT_EQ(0, rc) << "Should success.\n";
+
+  // Verify cgroups parameters
+  const char* expected_cgroups_argv_2[] = { "devices", "deny", container_id, 
"c 195:1 rwm",
+"devices", "deny", container_id, "c 195:3 rwm"};
+  verify_param_updated_to_cgroups(8, expected_cgroups_argv_2);
 }
 
 TEST_F(TestGpuModule, test_illegal_cli_parameters) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: HDFS-12502. nntop should support a category based on FilesInGetListingOps.

2017-10-19 Thread haibochen
HDFS-12502. nntop should support a category based on FilesInGetListingOps.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60bfee27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60bfee27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60bfee27

Branch: refs/heads/YARN-1011
Commit: 60bfee270ed3a653c44c0bc92396167b5022df6e
Parents: b016f08
Author: Zhe Zhang 
Authored: Wed Oct 18 23:51:24 2017 -0700
Committer: Zhe Zhang 
Committed: Wed Oct 18 23:51:24 2017 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  |  5 
 .../server/namenode/top/metrics/TopMetrics.java | 30 +++-
 .../server/namenode/metrics/TestTopMetrics.java | 11 +--
 3 files changed, 36 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60bfee27/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index e8d7161..1e8f319 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3673,6 +3673,11 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   readUnlock(operationName);
 }
 logAuditEvent(true, operationName, src);
+if (topConf.isEnabled && isAuditEnabled() && isExternalInvocation()
+&& dl != null && Server.getRemoteUser() != null) {
+  topMetrics.reportFilesInGetListing(Server.getRemoteUser().toString(),
+  dl.getPartialListing().length);
+}
 return dl;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60bfee27/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
index 2719c88..3d8dd19 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
@@ -70,6 +70,14 @@ public class TopMetrics implements MetricsSource {
   public static final Logger LOG = LoggerFactory.getLogger(TopMetrics.class);
   public static final String TOPMETRICS_METRICS_SOURCE_NAME =
   "NNTopUserOpCounts";
+  /**
+   * In addition to counts of different RPC calls, NNTop also reports top
+   * users listing large directories (measured by the number of files involved
+   * in listing operations from the user). This is important because the CPU
+   * and GC overhead of a listing operation grows linearly with the number of
+   * files involved. This category in NNTop is {@link #FILES_IN_GETLISTING}.
+   */
+  public static final String FILES_IN_GETLISTING = "filesInGetListing";
   private final boolean isMetricsSourceEnabled;
 
   private static void logConf(Configuration conf) {
@@ -123,22 +131,30 @@ public class TopMetrics implements MetricsSource {
   public void report(boolean succeeded, String userName, InetAddress addr,
   String cmd, String src, String dst, FileStatus status) {
 // currently nntop only makes use of the username and the command
-report(userName, cmd);
+report(userName, cmd, 1);
   }
 
-  public void report(String userName, String cmd) {
+  public void reportFilesInGetListing(String userName, int numFiles) {
+report(userName, FILES_IN_GETLISTING, numFiles);
+  }
+
+  public void report(String userName, String cmd, int delta) {
 long currTime = Time.monotonicNow();
-report(currTime, userName, cmd);
+report(currTime, userName, cmd, delta);
   }
 
-  public void report(long currTime, String userName, String cmd) {
+  public void report(long currTime, String userName, String cmd, int delta) {
 LOG.debug("a metric is reported: cmd: {} user: {}", cmd, userName);
 userName = UserGroupInformation.trimLoginMethod(userName);
 for (RollingWindowManager rollingWindowManager : rollingWindowManagers
 .values()) {
-  rollingWindowManager.recordMetric(currTime, cmd, userName, 1);
-  rollingWindowManager.recordMetric(currTime,
-  

[08/50] [abbrv] hadoop git commit: Revert "HADOOP-13514. Upgrade maven surefire plugin to 2.19.1."

2017-10-19 Thread haibochen
Revert "HADOOP-13514. Upgrade maven surefire plugin to 2.19.1."

This reverts commit 3d04c00aed63ca49420210f5f34efe39897aae63.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0de40f0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0de40f0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0de40f0e

Branch: refs/heads/YARN-1011
Commit: 0de40f0e27c3369869eaa9f7698c638a7149e705
Parents: e46d5bb
Author: Akira Ajisaka 
Authored: Fri Oct 13 18:00:25 2017 +0900
Committer: Akira Ajisaka 
Committed: Fri Oct 13 18:00:25 2017 +0900

--
 BUILDING.txt  | 4 ++--
 dev-support/docker/Dockerfile | 2 +-
 hadoop-project/pom.xml| 4 ++--
 3 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de40f0e/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 9955563..47aaab4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -331,10 +331,10 @@ If the build process fails with an out of memory error, 
you should be able to fi
 it by increasing the memory used by maven which can be done via the environment
 variable MAVEN_OPTS.
 
-Here is an example setting to allocate between 256 MB and 1 GB of heap space to
+Here is an example setting to allocate between 256 and 512 MB of heap space to
 Maven
 
-export MAVEN_OPTS="-Xms256m -Xmx1g"
+export MAVEN_OPTS="-Xms256m -Xmx512m"
 
 
--
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de40f0e/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 1ced9ef..31ac611 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -147,7 +147,7 @@ RUN pip install python-dateutil
 ###
 # Avoid out of memory errors in builds
 ###
-ENV MAVEN_OPTS -Xms256m -Xmx1g
+ENV MAVEN_OPTS -Xms256m -Xmx512m
 
 ###
 # Install node js tools for web UI frameowkr

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de40f0e/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 66e3dfc..8980f0e 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -117,7 +117,7 @@
 
 
 -Xmx2048m 
-XX:+HeapDumpOnOutOfMemoryError
-2.19.1
+2.17
 
${maven-surefire-plugin.version}
 
${maven-surefire-plugin.version}
 
@@ -1546,7 +1546,7 @@
 ${test.build.data}
 ${test.build.webapps}
 ${test.cache.data}
-
${project.build.directory}/test-classes
+${test.build.classes}
 
 true
 
${project.build.directory}/test-classes/krb5.conf


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HDFS-12619. Do not catch and throw unchecked exceptions if IBRs fail to process. Contributed by Wei-Chiu Chuang.

2017-10-19 Thread haibochen
HDFS-12619. Do not catch and throw unchecked exceptions if IBRs fail to 
process. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ab0c8f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ab0c8f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ab0c8f9

Branch: refs/heads/YARN-1011
Commit: 4ab0c8f96a41c573cc1f1e71c18871d243f952b9
Parents: 97c70c7
Author: Wei-Chiu Chuang 
Authored: Thu Oct 19 06:17:59 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Oct 19 06:17:59 2017 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java  | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ab0c8f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 386ba27..8b0f094 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3889,11 +3889,15 @@ public class BlockManager implements BlockStatsMXBean {
   throw new IOException(
   "Got incremental block report from unregistered or dead node");
 }
+
+boolean successful = false;
 try {
   processIncrementalBlockReport(node, srdb);
-} catch (Exception ex) {
-  node.setForceRegistration(true);
-  throw ex;
+  successful = true;
+} finally {
+  if (!successful) {
+node.setForceRegistration(true);
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: YARN-6670 Add separate NM overallocation thresholds for cpu and memory (Haibo Chen)

2017-10-19 Thread haibochen
YARN-6670 Add separate NM overallocation thresholds for cpu and memory (Haibo 
Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85a68873
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85a68873
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85a68873

Branch: refs/heads/YARN-1011
Commit: 85a688738ce01a371facb78c1b9e2adfde3d2cfd
Parents: 2bb9569
Author: Haibo Chen 
Authored: Mon Jul 10 09:55:42 2017 -0700
Committer: Haibo Chen 
Committed: Thu Oct 19 21:26:14 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 36 +--
 .../src/main/resources/yarn-default.xml | 42 ++--
 .../server/api/records/ResourceThresholds.java  | 11 +++-
 .../monitor/ContainersMonitorImpl.java  | 67 +++-
 4 files changed, 124 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85a68873/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index be7e64d..f312f24 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1831,17 +1831,39 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
-  /** Overallocation (= allocation based on utilization) configs. */
-  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
-  NM_PREFIX + "overallocation.allocation-threshold";
-  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
-  = 0f;
+  /**
+   * General overallocation threshold if no resource-type-specific
+   * threshold is provided.
+   */
+  public static final String NM_OVERALLOCATION_GENERAL_THRESHOLD =
+  NM_PREFIX + "overallocation.general-utilization-threshold";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_GENERAL_THRESHOLD = -1.0f;
+  /**
+   * The maximum value of utilization threshold for all resource types
+   * up to which the scheduler allocates OPPORTUNISTIC containers.
+   */
   @Private
-  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final float MAX_NM_OVERALLOCATION_THRESHOLD = 0.95f;
+
+  /**
+   * NM CPU utilization threshold up to which the scheduler allocates
+   * OPPORTUNISTIC containers after the node's capacity is fully allocated.
+   */
+  public static final String NM_OVERALLOCATION_CPU_UTILIZATION_THRESHOLD =
+  NM_PREFIX + "overallocation.cpu-utilization-threshold";
+
+  /**
+   * NM memory utilization threshold up to which the scheduler allocates
+   * OPPORTUNISTIC containers after the node's capacity is fully allocated.
+   */
+  public static final String NM_OVERALLOCATION_MEMORY_UTILIZATION_THRESHOLD =
+  NM_PREFIX + "overallocation.memory-utilization-threshold";
+
   public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
   NM_PREFIX + "overallocation.preemption-threshold";
   public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
-  = 0f;
+  = 0.96f;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85a68873/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 04ff0e5..3a98060 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1603,14 +1603,44 @@
 
   
 The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node 
that
+  applies to all resource types (expressed as a float between 0 and 0.95).
+  By default, over-allocation is turned off (value = -1). When turned on,
+  the node allows running OPPORTUNISTIC containers when the aggregate
+  utilization for each resource type is under the value specified here

[47/50] [abbrv] hadoop git commit: HDFS-12448. Make sure user defined erasure coding policy ID will not overflow. Contributed by Huafeng Wang

2017-10-19 Thread haibochen
HDFS-12448. Make sure user defined erasure coding policy ID will not overflow. 
Contributed by Huafeng Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce7cf66e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce7cf66e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce7cf66e

Branch: refs/heads/YARN-1011
Commit: ce7cf66e5ed74c124afdb5a6902fbf297211cc04
Parents: 0f1c037
Author: Kai Zheng 
Authored: Fri Oct 20 09:42:04 2017 +0800
Committer: Kai Zheng 
Committed: Fri Oct 20 09:42:04 2017 +0800

--
 .../io/erasurecode/ErasureCodeConstants.java|  1 +
 .../namenode/ErasureCodingPolicyManager.java| 20 +++
 .../src/site/markdown/HDFSErasureCoding.md  |  2 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 27 
 4 files changed, 44 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce7cf66e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index d3c3b6b..73b8f56 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -50,6 +50,7 @@ public final class ErasureCodeConstants {
   public static final ECSchema REPLICATION_1_2_SCHEMA = new ECSchema(
   REPLICATION_CODEC_NAME, 1, 2);
 
+  public static final byte MAX_POLICY_ID = Byte.MAX_VALUE;
   public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
   public static final byte REPLICATION_POLICY_ID = (byte) 63;
   public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce7cf66e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 90699b4..62c7f60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -253,6 +253,14 @@ public final class ErasureCodingPolicyManager {
 return p;
   }
 }
+
+if (getCurrentMaxPolicyID() == ErasureCodeConstants.MAX_POLICY_ID) {
+  throw new HadoopIllegalArgumentException("Adding erasure coding " +
+  "policy failed because the number of policies stored in the " +
+  "system already reached the threshold, which is " +
+  ErasureCodeConstants.MAX_POLICY_ID);
+}
+
 policy.setName(assignedNewName);
 policy.setId(getNextAvailablePolicyID());
 this.policiesByName.put(policy.getName(), policy);
@@ -261,12 +269,14 @@ public final class ErasureCodingPolicyManager {
 return policy;
   }
 
+  private byte getCurrentMaxPolicyID() {
+return policiesByID.keySet().stream().max(Byte::compareTo).orElse((byte)0);
+  }
+
   private byte getNextAvailablePolicyID() {
-byte currentId = this.policiesByID.keySet().stream()
-.max(Byte::compareTo)
-.filter(id -> id >= ErasureCodeConstants.USER_DEFINED_POLICY_START_ID)
-.orElse(ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
-return (byte) (currentId + 1);
+byte nextPolicyID = (byte)(getCurrentMaxPolicyID() + 1);
+return nextPolicyID > ErasureCodeConstants.USER_DEFINED_POLICY_START_ID ?
+nextPolicyID : ErasureCodeConstants.USER_DEFINED_POLICY_START_ID;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce7cf66e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index a171665..270201a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -193,7 +193,7 @@ Below are the 

[06/50] [abbrv] hadoop git commit: MAPREDUCE-5951. Add support for the YARN Shared Cache.

2017-10-19 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e46d5bb9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
index d0d7a34..d347da5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
@@ -220,7 +220,7 @@ public class TestJobResourceUploader {
   destinationPathPrefix + "tmpArchives1.tgz#tmpArchivesfragment1.tgz" 
};
 
   private String jobjarSubmitDir = "/jobjar-submit-dir";
-  private String expectedJobJar = jobjarSubmitDir + "/job.jar";
+  private String basicExpectedJobJar = jobjarSubmitDir + "/job.jar";
 
   @Test
   public void testPathsWithNoFragNoSchemeRelative() throws IOException {
@@ -236,7 +236,7 @@ public class TestJobResourceUploader {
 JobResourceUploader uploader = new StubedUploader(jConf);
 
 runTmpResourcePathTest(uploader, rConf, jConf, expectedFilesNoFrags,
-expectedArchivesNoFrags, expectedJobJar);
+expectedArchivesNoFrags, basicExpectedJobJar);
   }
 
   @Test
@@ -254,7 +254,7 @@ public class TestJobResourceUploader {
 JobResourceUploader uploader = new StubedUploader(jConf);
 
 runTmpResourcePathTest(uploader, rConf, jConf, expectedFilesNoFrags,
-expectedArchivesNoFrags, expectedJobJar);
+expectedArchivesNoFrags, basicExpectedJobJar);
   }
 
   @Test
@@ -272,7 +272,7 @@ public class TestJobResourceUploader {
 JobResourceUploader uploader = new StubedUploader(jConf);
 
 runTmpResourcePathTest(uploader, rConf, jConf, expectedFilesWithFrags,
-expectedArchivesWithFrags, expectedJobJar);
+expectedArchivesWithFrags, basicExpectedJobJar);
   }
 
   @Test
@@ -290,7 +290,7 @@ public class TestJobResourceUploader {
 JobResourceUploader uploader = new StubedUploader(jConf);
 
 runTmpResourcePathTest(uploader, rConf, jConf, expectedFilesWithFrags,
-expectedArchivesWithFrags, expectedJobJar);
+expectedArchivesWithFrags, basicExpectedJobJar);
   }
 
   @Test
@@ -308,7 +308,7 @@ public class TestJobResourceUploader {
 JobResourceUploader uploader = new StubedUploader(jConf);
 
 runTmpResourcePathTest(uploader, rConf, jConf, expectedFilesWithFrags,
-expectedArchivesWithFrags, expectedJobJar);
+expectedArchivesWithFrags, basicExpectedJobJar);
   }
 
   @Test
@@ -326,7 +326,7 @@ public class TestJobResourceUploader {
 JobResourceUploader uploader = new StubedUploader(jConf);
 
 runTmpResourcePathTest(uploader, rConf, jConf, expectedFilesNoFrags,
-expectedArchivesNoFrags, expectedJobJar);
+expectedArchivesNoFrags, basicExpectedJobJar);
   }
 
   @Test
@@ -344,7 +344,7 @@ public class TestJobResourceUploader {
 JobResourceUploader uploader = new StubedUploader(jConf, true);
 
 runTmpResourcePathTest(uploader, rConf, jConf, expectedFilesWithWildcard,
-expectedArchivesNoFrags, expectedJobJar);
+expectedArchivesNoFrags, basicExpectedJobJar);
   }
 
   @Test
@@ -362,7 +362,7 @@ public class TestJobResourceUploader {
 JobResourceUploader uploader = new StubedUploader(jConf, true);
 
 runTmpResourcePathTest(uploader, rConf, jConf, expectedFilesWithFrags,
-expectedArchivesWithFrags, expectedJobJar);
+expectedArchivesWithFrags, basicExpectedJobJar);
   }
 
   @Test
@@ -402,44 +402,39 @@ public class TestJobResourceUploader {
   private void runTmpResourcePathTest(JobResourceUploader uploader,
   ResourceConf rConf, JobConf jConf, String[] expectedFiles,
   String[] expectedArchives, String expectedJobJar) throws IOException {
-rConf.setupJobConf(jConf);
-// We use a pre and post job object here because we need the post job 
object
-// to get the new values set during uploadResources, but we need the pre 
job
-// to set the job jar because JobResourceUploader#uploadJobJar uses the Job
-// interface not the JobConf. The post job is automatically created in
-// validateResourcePaths.
-Job jobPre = Job.getInstance(jConf);
-uploadResources(uploader, jConf, jobPre);
-
-validateResourcePaths(jConf, expectedFiles, expectedArchives,
-expectedJobJar, jobPre);
+Job job = rConf.setupJobConf(jConf);
+uploadResources(uploader, job);
+validateResourcePaths(job, expectedFiles, expectedArchives, 
expectedJobJar);
   }
 
-  

[40/50] [abbrv] hadoop git commit: HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)

2017-10-19 Thread haibochen
HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar 
via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3dd3d1dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3dd3d1dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3dd3d1dd

Branch: refs/heads/YARN-1011
Commit: 3dd3d1dd77515be6d62e637c27d7d37d24058617
Parents: 4ab0c8f
Author: Haibo Chen 
Authored: Thu Oct 19 13:25:08 2017 -0700
Committer: Haibo Chen 
Committed: Thu Oct 19 13:25:08 2017 -0700

--
 hadoop-client-modules/hadoop-client/pom.xml | 60 
 1 file changed, 60 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dd3d1dd/hadoop-client-modules/hadoop-client/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client/pom.xml 
b/hadoop-client-modules/hadoop-client/pom.xml
index bed3f5c..a738d47 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -179,6 +179,66 @@
 
 
   org.apache.hadoop
+  hadoop-yarn-client
+  compile
+  
+
+
+  org.apache.hadoop
+  hadoop-yarn-api
+
+
+  org.apache.hadoop
+  hadoop-yarn-common
+
+
+  org.apache.hadoop
+  hadoop-annotations
+
+
+  com.google.guava
+  guava
+
+
+  commons-cli
+  commons-cli
+
+
+  log4j
+  log4j
+
+
+  com.sun.jersey
+  jersey-core
+
+
+  com.sun.jersey
+  jersey-server
+
+
+  com.sun.jersey
+  jersey-json
+
+
+  com.sun.jersey
+  jersey-servlet
+
+
+  io.netty
+  netty
+
+
+  com.google.inject.extensions
+  guice-servlet
+
+
+  
+
+
+
+  org.apache.hadoop
   hadoop-mapreduce-client-core
   compile
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: HDFS-12614. FSPermissionChecker#getINodeAttrs() throws NPE when INodeAttributesProvider configured.

2017-10-19 Thread haibochen
HDFS-12614. FSPermissionChecker#getINodeAttrs() throws NPE when 
INodeAttributesProvider configured.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b406d8e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b406d8e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b406d8e3

Branch: refs/heads/YARN-1011
Commit: b406d8e3755d24ce72c443fd893a5672fd56babc
Parents: e906108
Author: Manoj Govindassamy 
Authored: Mon Oct 16 17:42:41 2017 -0700
Committer: Manoj Govindassamy 
Committed: Mon Oct 16 17:42:41 2017 -0700

--
 .../server/namenode/FSPermissionChecker.java| 12 +++-
 .../namenode/TestINodeAttributeProvider.java| 60 ++--
 2 files changed, 54 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b406d8e3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index f745a6c..c854b49 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -275,8 +275,16 @@ class FSPermissionChecker implements AccessControlEnforcer 
{
 INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId);
 if (getAttributesProvider() != null) {
   String[] elements = new String[pathIdx + 1];
-  for (int i = 0; i < elements.length; i++) {
-elements[i] = DFSUtil.bytes2String(pathByNameArr[i]);
+  /**
+   * {@link INode#getPathComponents(String)} returns a null component
+   * for the root only path "/". Assign an empty string if so.
+   */
+  if (pathByNameArr.length == 1 && pathByNameArr[0] == null) {
+elements[0] = "";
+  } else {
+for (int i = 0; i < elements.length; i++) {
+  elements[i] = DFSUtil.bytes2String(pathByNameArr[i]);
+}
   }
   inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b406d8e3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index bbc5fa0..9c7dcd3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -313,31 +313,59 @@ public class TestINodeAttributeProvider {
 testBypassProviderHelper(users, HDFS_PERMISSION, true);
   }
 
-  @Test
-  public void testCustomProvider() throws Exception {
+  private void verifyFileStatus(UserGroupInformation ugi) throws IOException {
 FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
-fs.mkdirs(new Path("/user/xxx"));
-FileStatus status = fs.getFileStatus(new Path("/user/xxx"));
-Assert.assertEquals(System.getProperty("user.name"), status.getOwner());
+
+FileStatus status = fs.getFileStatus(new Path("/"));
+LOG.info("Path '/' is owned by: "
++ status.getOwner() + ":" + status.getGroup());
+
+Path userDir = new Path("/user/" + ugi.getShortUserName());
+fs.mkdirs(userDir);
+status = fs.getFileStatus(userDir);
+Assert.assertEquals(ugi.getShortUserName(), status.getOwner());
 Assert.assertEquals("supergroup", status.getGroup());
 Assert.assertEquals(new FsPermission((short) 0755), 
status.getPermission());
-fs.mkdirs(new Path("/user/authz"));
-Path p = new Path("/user/authz");
-status = fs.getFileStatus(p);
+
+Path authzDir = new Path("/user/authz");
+fs.mkdirs(authzDir);
+status = fs.getFileStatus(authzDir);
 Assert.assertEquals("foo", status.getOwner());
 Assert.assertEquals("bar", status.getGroup());
 Assert.assertEquals(new FsPermission((short) 0770), 
status.getPermission());
-AclStatus aclStatus = fs.getAclStatus(p);
+
+AclStatus aclStatus = fs.getAclStatus(authzDir);
 Assert.assertEquals(1, aclStatus.getEntries().size());
-

[13/50] [abbrv] hadoop git commit: YARN-7310. TestAMRMProxy#testAMRMProxyE2E fails with FairScheduler. (Robert Kanter via Haibo Chen)

2017-10-19 Thread haibochen
YARN-7310. TestAMRMProxy#testAMRMProxyE2E fails with FairScheduler. (Robert 
Kanter via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fb47188
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fb47188
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fb47188

Branch: refs/heads/YARN-1011
Commit: 3fb47188867124bf18fb798b460928b3557ab9a1
Parents: 8dd1eeb
Author: Haibo Chen 
Authored: Fri Oct 13 15:49:21 2017 -0700
Committer: Haibo Chen 
Committed: Fri Oct 13 15:49:21 2017 -0700

--
 .../org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb47188/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
index 6a063e6..ea7d892 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -67,6 +68,9 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest {
 YarnClient rmClient = YarnClient.createYarnClient()) {
   Configuration conf = new YarnConfiguration();
   conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
+  // Make sure if using FairScheduler that we can assign multiple 
containers
+  // in a single heartbeat later
+  conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true);
   cluster.init(conf);
   cluster.start();
   final Configuration yarnConf = cluster.getConfig();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: Addendum fix for: YARN-7269. Tracking URL in the app state does not get redirected to ApplicationMaster for Running applications. (Wangda Tan)

2017-10-19 Thread haibochen
Addendum fix for: YARN-7269. Tracking URL in the app state does not get 
redirected to ApplicationMaster for Running applications. (Wangda Tan)

Change-Id: If1fe4a62e07b25e6f1b8ba803495da87e2cb2df6


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87ea1dff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87ea1dff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87ea1dff

Branch: refs/heads/YARN-1011
Commit: 87ea1dff9c431fb88e064e497b35fc17c65f1d53
Parents: 133d7ca
Author: Wangda Tan 
Authored: Sat Oct 14 10:41:58 2017 -0700
Committer: Wangda Tan 
Committed: Sat Oct 14 10:41:58 2017 -0700

--
 .../webproxy/amfilter/AmFilterInitializer.java  | 16 ++--
 .../webproxy/amfilter/TestAmFilterInitializer.java  |  8 
 2 files changed, 14 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87ea1dff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java
index fa54c79..c3ddc54 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java
@@ -62,15 +62,19 @@ public class AmFilterInitializer extends FilterInitializer {
 container.addFilter(FILTER_NAME, FILTER_CLASS, params);
 
 // Handle RM HA urls
-List urls = new ArrayList<>();
-
 // Include yarn-site.xml in the classpath
 YarnConfiguration yarnConf = new YarnConfiguration(conf);
-for (String rmId : getRmIds(yarnConf)) {
-  String url = getUrlByRmId(yarnConf, rmId);
-  urls.add(url);
+Collection rmIds = getRmIds(yarnConf);
+if (rmIds != null) {
+  List urls = new ArrayList<>();
+  for (String rmId : rmIds) {
+String url = getUrlByRmId(yarnConf, rmId);
+urls.add(url);
+  }
+  if (!urls.isEmpty()) {
+params.put(RM_HA_URLS, StringUtils.join(",", urls));
+  }
 }
-params.put(RM_HA_URLS, StringUtils.join(",", urls));
   }
 
   private Collection getRmIds(Configuration conf) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87ea1dff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
index b621012..97625ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
@@ -56,11 +56,11 @@ public class TestAmFilterInitializer {
 AmFilterInitializer afi = new MockAmFilterInitializer();
 assertNull(con.givenParameters);
 afi.initFilter(con, conf);
-assertEquals(3, con.givenParameters.size());
+assertEquals(2, con.givenParameters.size());
 assertEquals("host1", con.givenParameters.get(AmIpFilter.PROXY_HOSTS));
 assertEquals("http://host1:1000/foo;,
 con.givenParameters.get(AmIpFilter.PROXY_URI_BASES));
-assertEquals("", con.givenParameters.get(AmFilterInitializer.RM_HA_URLS));
+assertEquals(null, 
con.givenParameters.get(AmFilterInitializer.RM_HA_URLS));
 
 // Check a single RM_WEBAPP_ADDRESS
 con = new MockFilterContainer();
@@ -69,11 +69,11 @@ public class TestAmFilterInitializer {
 afi = new MockAmFilterInitializer();
 assertNull(con.givenParameters);
 afi.initFilter(con, conf);
-assertEquals(3, con.givenParameters.size());
+assertEquals(2, 

[34/50] [abbrv] hadoop git commit: HADOOP-14944. Add JvmMetrics to KMS.

2017-10-19 Thread haibochen
HADOOP-14944. Add JvmMetrics to KMS.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86ee0c5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86ee0c5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86ee0c5e

Branch: refs/heads/YARN-1011
Commit: 86ee0c5e4e304d9551a24a3d8b9161ca1502b70e
Parents: 46eb103
Author: Xiao Chen 
Authored: Tue Oct 17 15:55:30 2017 -0700
Committer: Xiao Chen 
Committed: Tue Oct 17 19:06:45 2017 -0700

--
 .../hadoop/metrics2/source/JvmMetrics.java  | 16 +
 .../crypto/key/kms/server/KMSConfiguration.java |  9 +
 .../crypto/key/kms/server/KMSWebServer.java | 24 ++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 35 
 4 files changed, 84 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86ee0c5e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
index c6369cd..e3f8754 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
@@ -58,6 +58,11 @@ public class JvmMetrics implements MetricsSource {
   }
   return impl;
 }
+
+synchronized void shutdown() {
+  DefaultMetricsSystem.instance().unregisterSource(JvmMetrics.name());
+  impl = null;
+}
   }
 
   @VisibleForTesting
@@ -81,6 +86,7 @@ public class JvmMetrics implements MetricsSource {
   final ConcurrentHashMap gcInfoCache =
   new ConcurrentHashMap();
 
+  @VisibleForTesting
   JvmMetrics(String processName, String sessionId) {
 this.processName = processName;
 this.sessionId = sessionId;
@@ -104,6 +110,16 @@ public class JvmMetrics implements MetricsSource {
 return Singleton.INSTANCE.init(processName, sessionId);
   }
 
+  /**
+   * Shutdown the JvmMetrics singleton. This is not necessary if the JVM itself
+   * is shutdown, but may be necessary for scenarios where JvmMetrics instance
+   * needs to be re-created while the JVM is still around. One such scenario
+   * is unit-testing.
+   */
+  public static void shutdownSingleton() {
+Singleton.INSTANCE.shutdown();
+  }
+
   @Override
   public void getMetrics(MetricsCollector collector, boolean all) {
 MetricsRecordBuilder rb = collector.addRecord(JvmMetrics)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86ee0c5e/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index df17ef5..18eec19 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -76,6 +76,15 @@ public class KMSConfiguration {
   public static final String KMS_AUDIT_AGGREGATION_WINDOW = CONFIG_PREFIX +
   "audit.aggregation.window.ms";
 
+  // Process name shown in metrics
+  public static final String METRICS_PROCESS_NAME_KEY =
+  CONFIG_PREFIX + "metrics.process.name";
+  public static final String METRICS_PROCESS_NAME_DEFAULT = "KMS";
+
+  // Session id for metrics
+  public static final String METRICS_SESSION_ID_KEY =
+  CONFIG_PREFIX + "metrics.session.id";
+
   // KMS Audit logger classes to use
   public static final String KMS_AUDIT_LOGGER_KEY = CONFIG_PREFIX +
   "audit.logger";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86ee0c5e/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
index ced1f69..19f7227 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
+++ 

[18/50] [abbrv] hadoop git commit: HDFS-12603. Enable async edit logging by default. Contributed by Andrew Wang.

2017-10-19 Thread haibochen
HDFS-12603. Enable async edit logging by default. Contributed by Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/035c6ee5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/035c6ee5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/035c6ee5

Branch: refs/heads/YARN-1011
Commit: 035c6ee587e444550af6420676e4cee049e09869
Parents: 9fcc3a1
Author: Xiao Chen 
Authored: Mon Oct 16 09:43:39 2017 -0700
Committer: Xiao Chen 
Committed: Mon Oct 16 09:51:10 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  2 +-
 .../src/main/resources/hdfs-default.xml |  2 +-
 .../namenode/ha/TestFailureToReadEdits.java | 25 ++--
 4 files changed, 21 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/035c6ee5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1f96763..395b192 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -326,7 +326,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_NAMENODE_EDITS_ASYNC_LOGGING =
   "dfs.namenode.edits.asynclogging";
-  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = false;
+  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = true;
 
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int DFS_LIST_LIMIT_DEFAULT = 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/035c6ee5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index af2a5af9..7ca63f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -130,7 +130,7 @@ public class FSEditLog implements LogsPurgeable {
* 
* In a non-HA setup:
* 
-   * The log starts in UNITIALIZED state upon construction. Once it's
+   * The log starts in UNINITIALIZED state upon construction. Once it's
* initialized, it is usually in IN_SEGMENT state, indicating that edits may
* be written. In the middle of a roll, or while saving the namespace, it
* briefly enters the BETWEEN_LOG_SEGMENTS state, indicating that the 
previous

http://git-wip-us.apache.org/repos/asf/hadoop/blob/035c6ee5/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 3491ed2..3752578 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4105,7 +4105,7 @@
 
 
   dfs.namenode.edits.asynclogging
-  false
+  true
   
 If set to true, enables asynchronous edit logs in the Namenode.  If set
 to false, the Namenode uses the traditional synchronous edit logs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/035c6ee5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
index 93c717c..a37631f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
@@ -75,6 +75,7 @@ public class TestFailureToReadEdits {
   private static final Random RANDOM = new Random();
 
   private 

[03/50] [abbrv] hadoop git commit: YARN-7205. Log improvements for the ResourceUtils. (Sunil G via wangda)

2017-10-19 Thread haibochen
YARN-7205. Log improvements for the ResourceUtils. (Sunil G via wangda)

Change-Id: I0f5b7a7f68ec5d3e1d52211f83fdd089bc0bfd37


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bcc49e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bcc49e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bcc49e6

Branch: refs/heads/YARN-1011
Commit: 8bcc49e6771ca75f012211e27870a421b19233e7
Parents: b6c2c90
Author: Wangda Tan 
Authored: Wed Oct 11 15:25:28 2017 -0700
Committer: Wangda Tan 
Committed: Wed Oct 11 15:25:28 2017 -0700

--
 .../yarn/api/records/ResourceTypeInfo.java  |  7 +--
 .../yarn/util/resource/ResourceUtils.java   | 45 +++-
 2 files changed, 30 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bcc49e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
index b6f7f14..8775342 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
@@ -152,9 +152,10 @@ public abstract class ResourceTypeInfo implements 
Comparable {
   @Override
   public String toString() {
 StringBuilder sb = new StringBuilder();
-

[12/50] [abbrv] hadoop git commit: HDFS-12553. Add nameServiceId to QJournalProtocol. Contributed by Bharat Viswanadham

2017-10-19 Thread haibochen
HDFS-12553. Add nameServiceId to QJournalProtocol. Contributed by Bharat 
Viswanadham


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8dd1eeb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8dd1eeb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8dd1eeb9

Branch: refs/heads/YARN-1011
Commit: 8dd1eeb94fef59feaf19182dd8f1fcf1389c7f34
Parents: e163f41
Author: Arpit Agarwal 
Authored: Fri Oct 13 14:22:21 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Oct 13 14:22:21 2017 -0700

--
 .../hdfs/qjournal/client/AsyncLogger.java   |   2 +-
 .../hdfs/qjournal/client/IPCLoggerChannel.java  |  43 +++--
 .../qjournal/client/QuorumJournalManager.java   |  35 +++-
 .../qjournal/protocol/QJournalProtocol.java |  40 +++--
 .../hdfs/qjournal/protocol/RequestInfo.java |  11 +-
 .../QJournalProtocolServerSideTranslatorPB.java |  27 ++-
 .../QJournalProtocolTranslatorPB.java   | 178 ---
 .../hadoop/hdfs/qjournal/server/Journal.java|  15 +-
 .../hdfs/qjournal/server/JournalNode.java   |  74 ++--
 .../qjournal/server/JournalNodeRpcServer.java   |  71 +---
 .../hdfs/qjournal/server/JournalNodeSyncer.java |  24 ++-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  14 +-
 .../src/main/proto/QJournalProtocol.proto   |  12 ++
 .../qjournal/client/TestEpochsAreUnique.java|   4 +-
 .../hdfs/qjournal/client/TestQJMWithFaults.java |  12 +-
 .../client/TestQuorumJournalManager.java|   5 +-
 .../hdfs/qjournal/server/TestJournal.java   |   8 +-
 .../hdfs/qjournal/server/TestJournalNode.java   |  97 ++
 .../qjournal/server/TestJournalNodeSync.java|   7 +
 19 files changed, 503 insertions(+), 176 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dd1eeb9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
index 8504e80..d2b48cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
@@ -49,7 +49,7 @@ interface AsyncLogger {
   
   interface Factory {
 AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo,
-String journalId, InetSocketAddress addr);
+String journalId, String nameServiceId, InetSocketAddress addr);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dd1eeb9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index 6cd892c..3036735 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -95,6 +95,8 @@ public class IPCLoggerChannel implements AsyncLogger {
   private long committedTxId = HdfsServerConstants.INVALID_TXID;
   
   private final String journalId;
+  private final String nameServiceId;
+
   private final NamespaceInfo nsInfo;
 
   private URL httpServerURL;
@@ -152,8 +154,8 @@ public class IPCLoggerChannel implements AsyncLogger {
   static final Factory FACTORY = new AsyncLogger.Factory() {
 @Override
 public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo,
-String journalId, InetSocketAddress addr) {
-  return new IPCLoggerChannel(conf, nsInfo, journalId, addr);
+String journalId, String nameServiceId, InetSocketAddress addr) {
+  return new IPCLoggerChannel(conf, nsInfo, journalId, nameServiceId, 
addr);
 }
   };
 
@@ -161,11 +163,19 @@ public class IPCLoggerChannel implements AsyncLogger {
   NamespaceInfo nsInfo,
   String journalId,
   InetSocketAddress addr) {
+this(conf, nsInfo, journalId, null, addr);
+  }
+
+  public IPCLoggerChannel(Configuration conf,
+  NamespaceInfo nsInfo,
+  String journalId,
+  String nameServiceId,
+  InetSocketAddress addr) {
 this.conf = conf;
 this.nsInfo = nsInfo;

[32/50] [abbrv] hadoop git commit: HDFS-12612. DFSStripedOutputStream.close will throw if called a second time with a failed streamer. (Lei (Eddy) Xu)

2017-10-19 Thread haibochen
HDFS-12612. DFSStripedOutputStream.close will throw if called a second time 
with a failed streamer. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f27a4ad0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f27a4ad0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f27a4ad0

Branch: refs/heads/YARN-1011
Commit: f27a4ad0324aa0b4080a1c4c6bf4cd560c927e20
Parents: 7532339
Author: Lei Xu 
Authored: Tue Oct 17 15:52:09 2017 -0700
Committer: Lei Xu 
Committed: Tue Oct 17 15:52:09 2017 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 40 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java| 31 ++--
 .../apache/hadoop/hdfs/ExceptionLastSeen.java   | 75 +++
 .../TestDFSStripedOutputStreamWithFailure.java  | 76 
 4 files changed, 184 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f27a4ad0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 1b83959..39717ef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -82,6 +82,12 @@ public class DFSStripedOutputStream extends DFSOutputStream
 implements StreamCapabilities {
   private static final ByteBufferPool BUFFER_POOL = new 
ElasticByteBufferPool();
 
+  /**
+   * OutputStream level last exception, will be used to indicate the fatal
+   * exception of this stream, i.e., being aborted.
+   */
+  private final ExceptionLastSeen exceptionLastSeen = new ExceptionLastSeen();
+
   static class MultipleBlockingQueue {
 private final List queues;
 
@@ -971,12 +977,9 @@ public class DFSStripedOutputStream extends DFSOutputStream
   if (isClosed()) {
 return;
   }
-  for (StripedDataStreamer streamer : streamers) {
-streamer.getLastException().set(
-new IOException("Lease timeout of "
-+ (dfsClient.getConf().getHdfsTimeout() / 1000)
-+ " seconds expired."));
-  }
+  exceptionLastSeen.set(new IOException("Lease timeout of "
+  + (dfsClient.getConf().getHdfsTimeout() / 1000)
+  + " seconds expired."));
 
   try {
 closeThreads(true);
@@ -1133,18 +1136,26 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   @Override
   protected synchronized void closeImpl() throws IOException {
 if (isClosed()) {
+  exceptionLastSeen.check(true);
+
+  // Writing to at least {dataUnits} replicas can be considered as success,
+  // and the rest of data can be recovered.
+  final int minReplication = ecPolicy.getNumDataUnits();
+  int goodStreamers = 0;
   final MultipleIOException.Builder b = new MultipleIOException.Builder();
-  for(int i = 0; i < streamers.size(); i++) {
-final StripedDataStreamer si = getStripedDataStreamer(i);
+  for (final StripedDataStreamer si : streamers) {
 try {
   si.getLastException().check(true);
+  goodStreamers++;
 } catch (IOException e) {
   b.add(e);
 }
   }
-  final IOException ioe = b.build();
-  if (ioe != null) {
-throw ioe;
+  if (goodStreamers < minReplication) {
+final IOException ioe = b.build();
+if (ioe != null) {
+  throw ioe;
+}
   }
   return;
 }
@@ -1183,9 +1194,10 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
 }
   } finally {
 // Failures may happen when flushing data/parity data out. Exceptions
-// may be thrown if more than 3 streamers fail, or updatePipeline RPC
-// fails. Streamers may keep waiting for the new block/GS information.
-// Thus need to force closing these threads.
+// may be thrown if the number of failed streamers is more than the
+// number of parity blocks, or updatePipeline RPC fails. Streamers may
+// keep waiting for the new block/GS information. Thus need to force
+// closing these threads.
 closeThreads(true);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f27a4ad0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java

[26/50] [abbrv] hadoop git commit: YARN-7308. TestApplicationACLs fails with FairScheduler (rkanter)

2017-10-19 Thread haibochen
YARN-7308. TestApplicationACLs fails with FairScheduler (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a615259
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a615259
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a615259

Branch: refs/heads/YARN-1011
Commit: 8a615259281511de639c1f9aa719a842e34a523f
Parents: 4540ffd
Author: Robert Kanter 
Authored: Mon Oct 16 15:34:32 2017 -0700
Committer: Robert Kanter 
Committed: Mon Oct 16 15:34:32 2017 -0700

--
 .../ParameterizedSchedulerTestBase.java |  3 +-
 .../resourcemanager/TestApplicationACLs.java| 90 +++-
 2 files changed, 69 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a615259/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
index 289ff1c..9a29a89 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
@@ -89,7 +89,8 @@ public abstract class ParameterizedSchedulerTestBase {
 }
   }
 
-  private void configureFairScheduler(YarnConfiguration conf) throws 
IOException {
+  protected void configureFairScheduler(YarnConfiguration conf)
+  throws IOException {
 // Disable queueMaxAMShare limitation for fair scheduler
 PrintWriter out = new PrintWriter(new FileWriter(FS_ALLOC_FILE));
 out.println("");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a615259/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
index cab1679..c7ed02c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
@@ -22,7 +22,10 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.mockito.Matchers.any;
 
+import java.io.File;
+import java.io.FileWriter;
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.net.InetSocketAddress;
 import java.security.PrivilegedExceptionAction;
 import java.util.HashMap;
@@ -30,6 +33,9 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
+import org.junit.After;
 import org.junit.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -57,19 +63,17 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import 
org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreFactory;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager;
 import 

[24/50] [abbrv] hadoop git commit: HADOOP-14949. TestKMS#testACLs fails intermittently.

2017-10-19 Thread haibochen
HADOOP-14949. TestKMS#testACLs fails intermittently.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7ff624c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7ff624c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7ff624c

Branch: refs/heads/YARN-1011
Commit: b7ff624c767f76ca007d695afdc7a3815fceb04c
Parents: 1fcbe7c
Author: Xiao Chen 
Authored: Mon Oct 16 13:53:31 2017 -0700
Committer: Xiao Chen 
Committed: Mon Oct 16 14:19:31 2017 -0700

--
 .../java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java  | 4 
 .../apache/hadoop/crypto/key/kms/server/KMSConfiguration.java  | 6 ++
 .../java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java  | 6 +++---
 3 files changed, 13 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7ff624c/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index 096f756..b02f34e 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -326,4 +326,8 @@ public class KMSACLs implements Runnable, KeyACLs {
 || whitelistKeyAcls.containsKey(opType));
   }
 
+  @VisibleForTesting
+  void forceNextReloadForTesting() {
+lastReload = 0;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7ff624c/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index cf02dd1..df17ef5 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.net.MalformedURLException;
@@ -31,6 +33,8 @@ import java.net.URL;
 @InterfaceAudience.Private
 public class KMSConfiguration {
 
+  static final Logger LOG = LoggerFactory.getLogger(KMSConfiguration.class);
+
   public static final String KMS_CONFIG_DIR = "kms.config.dir";
   public static final String KMS_DEFAULT_XML = "kms-default.xml";
   public static final String KMS_SITE_XML = "kms-site.xml";
@@ -138,6 +142,8 @@ public class KMSConfiguration {
 "' must be an absolute path: " + confDir);
   }
   File f = new File(confDir, KMS_ACLS_XML);
+  LOG.trace("Checking file {}, modification time is {}, last reload time 
is"
+  + " {}", f.getPath(), f.lastModified(), time);
   // at least 100ms newer than time, we do this to ensure the file
   // has been properly closed/flushed
   newer = f.lastModified() - time > 100;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7ff624c/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 45546f2..30e84eb 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -54,6 +54,7 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.event.Level;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -1637,13 +1638,12 @@ public class TestKMS {
 //stop the reloader, to avoid running while we are writing the new file
 KMSWebApp.getACLs().stopReloader();
 
+GenericTestUtils.setLogLevel(KMSConfiguration.LOG, 

[22/50] [abbrv] hadoop git commit: YARN-7275. NM Statestore cleanup for Container updates. (Kartheek Muthyala via asuresh)

2017-10-19 Thread haibochen
YARN-7275. NM Statestore cleanup for Container updates. (Kartheek Muthyala via 
asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a50be1b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a50be1b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a50be1b8

Branch: refs/heads/YARN-1011
Commit: a50be1b8f432f50c940b66d12c7de87b95ea47c0
Parents: 8dbc890
Author: Arun Suresh 
Authored: Mon Oct 16 13:08:52 2017 -0700
Committer: Arun Suresh 
Committed: Mon Oct 16 13:12:15 2017 -0700

--
 .../containermanager/ContainerManagerImpl.java  |  9 +++
 .../container/ContainerEventType.java   |  4 +-
 .../container/ContainerImpl.java| 43 +-
 .../launcher/ContainersLauncher.java|  2 -
 .../launcher/RecoverPausedContainerLaunch.java  | 38 +++-
 .../launcher/RecoveredContainerLaunch.java  |  2 +-
 .../scheduler/ContainerScheduler.java   | 31 ++
 .../scheduler/ContainerSchedulerEventType.java  |  3 +-
 .../recovery/NMLeveldbStateStoreService.java| 62 ++--
 .../recovery/NMNullStateStoreService.java   |  6 +-
 .../recovery/NMStateStoreService.java   | 10 ++--
 .../TestContainerManagerRecovery.java   |  4 ++
 .../recovery/NMMemoryStateStoreService.java | 18 --
 .../TestNMLeveldbStateStoreService.java | 16 +++--
 14 files changed, 179 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a50be1b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 38eb636..7d5525a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -21,6 +21,7 @@ package 
org.apache.hadoop.yarn.server.nodemanager.containermanager;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.ByteString;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.UpdateContainerTokenEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.ContainerSchedulerEvent;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -367,6 +368,13 @@ public class ContainerManagerImpl extends CompositeService 
implements
 }
 recoverContainer(rcs);
   }
+
+  //Dispatching the RECOVERY_COMPLETED event through the dispatcher
+  //so that all the paused, scheduled and queued containers will
+  //be scheduled for execution on availability of resources.
+  dispatcher.getEventHandler().handle(
+  new ContainerSchedulerEvent(null,
+  ContainerSchedulerEventType.RECOVERY_COMPLETED));
 } else {
   LOG.info("Not a recoverable state store. Nothing to recover.");
 }
@@ -480,6 +488,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
 Container container = new ContainerImpl(getConfig(), dispatcher,
 launchContext, credentials, metrics, token, context, rcs);
 context.getContainers().put(token.getContainerID(), container);
+containerScheduler.recoverActiveContainer(container, rcs.getStatus());
 app.handle(new ApplicationContainerInitEvent(container));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a50be1b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java
 

[44/50] [abbrv] hadoop git commit: HADOOP-14816. Update Dockerfile to use Xenial. Contributed by Allen Wittenauer

2017-10-19 Thread haibochen
HADOOP-14816. Update Dockerfile to use Xenial. Contributed by Allen Wittenauer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca8ddc6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca8ddc6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca8ddc6a

Branch: refs/heads/YARN-1011
Commit: ca8ddc6aa413de347866ad9a0a3407356a280a1f
Parents: cbd2b73
Author: Chris Douglas 
Authored: Thu Oct 19 16:33:47 2017 -0700
Committer: Chris Douglas 
Committed: Thu Oct 19 16:45:18 2017 -0700

--
 dev-support/docker/Dockerfile   | 161 +++
 dev-support/docker/hadoop_env_checks.sh |  15 ++-
 2 files changed, 98 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca8ddc6a/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 31ac611..8af002d 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -18,21 +18,28 @@
 # Dockerfile for installing the necessary dependencies for building Hadoop.
 # See BUILDING.txt.
 
-
-FROM ubuntu:trusty
+FROM ubuntu:xenial
 
 WORKDIR /root
 
+#
+# Disable suggests/recommends
+#
+RUN echo APT::Install-Recommends "0"\; > /etc/apt/apt.conf.d/10disableextras
+RUN echo APT::Install-Suggests "0"\; >>  /etc/apt/apt.conf.d/10disableextras
+
 ENV DEBIAN_FRONTEND noninteractive
 ENV DEBCONF_TERSE true
 
 ##
-# Install common dependencies from packages
+# Install common dependencies from packages. Versions here are either
+# sufficient or irrelevant.
 #
 # WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default
 # Ubuntu Java.  See Java section below!
 ##
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
+RUN apt-get -q update && apt-get -q install -y \
+apt-utils \
 build-essential \
 bzip2 \
 curl \
@@ -42,7 +49,6 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 gcc \
 git \
 gnupg-agent \
-make \
 libbz2-dev \
 libcurl4-openssl-dev \
 libfuse-dev \
@@ -51,106 +57,110 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 libsnappy-dev \
 libssl-dev \
 libtool \
+locales \
+make \
 pinentry-curses \
 pkg-config \
-protobuf-compiler \
-protobuf-c-compiler \
 python \
 python2.7 \
-python2.7-dev \
 python-pip \
+python-pkg-resources \
+python-setuptools \
+python-wheel \
 rsync \
+software-properties-common \
 snappy \
+sudo \
 zlib1g-dev
 
 ###
-# Oracle Java
+# OpenJDK 8
 ###
+RUN apt-get -q install -y openjdk-8-jdk
 
-RUN echo "dot_style = mega" > "/root/.wgetrc"
-RUN echo "quiet = on" >> "/root/.wgetrc"
-
-RUN apt-get -q install --no-install-recommends -y software-properties-common
-RUN add-apt-repository -y ppa:webupd8team/java
-RUN apt-get -q update
-
-# Auto-accept the Oracle JDK license
-RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select 
true | sudo /usr/bin/debconf-set-selections
-RUN apt-get -q install --no-install-recommends -y oracle-java8-installer
-
-
-# Apps that require Java
-###
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y ant
+###
+# OpenJDK 9
+# w/workaround for
+# https://bugs.launchpad.net/ubuntu/+source/openjdk-9/+bug/1593191
+###
+RUN apt-get -o Dpkg::Options::="--force-overwrite" \
+-q install -y \
+openjdk-9-jdk-headless
 
-##
-# Install Apache Maven
-##
-RUN mkdir -p /opt/maven && \
-curl -L -s -S \
- 
https://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz
 \
- -o /opt/maven.tar.gz && \
-tar xzf /opt/maven.tar.gz --strip-components 1 -C /opt/maven
-ENV MAVEN_HOME /opt/maven
-ENV PATH "${PATH}:/opt/maven/bin"
+###
+# Set default Java
+###
+#
+# By default, OpenJDK sets the default Java to the highest version.
+# We want the opposite, soo
+#
+RUN update-java-alternatives --set java-1.8.0-openjdk-amd64
+RUN update-alternatives --get-selections | grep -i jdk | \
+while read line; do \
+  alternative=$(echo $line | awk '{print $1}'); \
+  path=$(echo $line | awk '{print $3}'); \
+  newpath=$(echo $path | sed -e 's/java-9/java-8/'); \
+  update-alternatives --set $alternative $newpath; \
+done
 
 ##
-# Install cmake
+# Install cmake 3.1.0 (3.5.1 ships with Xenial)
 ##
 RUN mkdir -p /opt/cmake && \
 curl -L -s -S \
- https://cmake.org/files/v3.1/cmake-3.1.0-Linux-x86_64.tar.gz \
- -o /opt/cmake.tar.gz && \
+  

[21/50] [abbrv] hadoop git commit: HDFS-12485. expunge may fail to remove trash from encryption zone. Contributed by Wei-Chiu Chuang.

2017-10-19 Thread haibochen
HDFS-12485. expunge may fail to remove trash from encryption zone. Contributed 
by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8dbc8909
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8dbc8909
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8dbc8909

Branch: refs/heads/YARN-1011
Commit: 8dbc8909c92d502d10a7f94d1de3171878a43b04
Parents: 21bc855
Author: Wei-Chiu Chuang 
Authored: Mon Oct 16 12:57:48 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Oct 16 12:57:48 2017 -0700

--
 .../main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java   | 3 +--
 .../apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java| 1 -
 2 files changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dbc8909/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index c9f4490..a8a5cfa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2783,8 +2783,7 @@ public class DistributedFileSystem extends FileSystem {
 }
   }
 } else {
-  Path userTrash = new Path(ezTrashRoot, System.getProperty(
-  "user.name"));
+  Path userTrash = new Path(ezTrashRoot, dfs.ugi.getShortUserName());
   try {
 ret.add(getFileStatus(userTrash));
   } catch (FileNotFoundException ignored) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dbc8909/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
index 314adfb..a8e2a71 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
@@ -225,7 +225,6 @@ public class TestTrashWithSecureEncryptionZones {
 clientConf = new Configuration(conf);
 clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
 shell = new FsShell(clientConf);
-System.setProperty("user.name", HDFS_USER_NAME);
   }
 
   @AfterClass


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: HDFS-12637. Extend TestDistributedFileSystemWithECFile with a random EC policy. Contributed by Takanobu Asanuma.

2017-10-19 Thread haibochen
HDFS-12637. Extend TestDistributedFileSystemWithECFile with a random EC policy. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bd70094
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bd70094
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bd70094

Branch: refs/heads/YARN-1011
Commit: 7bd700941d0a423d5232331fa19eab5fdab6a6fb
Parents: 035c6ee
Author: Xiao Chen 
Authored: Mon Oct 16 09:54:37 2017 -0700
Committer: Xiao Chen 
Committed: Mon Oct 16 09:55:22 2017 -0700

--
 .../TestDistributedFileSystemWithECFile.java| 37 +--
 ...dFileSystemWithECFileWithRandomECPolicy.java | 49 
 2 files changed, 73 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bd70094/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
index d4e01b7..14a2ec4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
@@ -39,34 +39,45 @@ import static org.junit.Assert.assertTrue;
  * FileSystem.listFiles for erasure coded files.
  */
 public class TestDistributedFileSystemWithECFile {
-  private final ErasureCodingPolicy ecPolicy =
-  StripedFileTestUtil.getDefaultECPolicy();
-  private final int cellSize = ecPolicy.getCellSize();
-  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
-  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
-  private final int numDNs = dataBlocks + parityBlocks;
-  private final int stripesPerBlock = 4;
-  private final int blockSize = stripesPerBlock * cellSize;
-  private final int blockGroupSize = blockSize * dataBlocks;
+  private ErasureCodingPolicy ecPolicy;
+  private int cellSize;
+  private short dataBlocks;
+  private short parityBlocks;
+  private int numDNs;
+  private int stripesPerBlock;
+  private int blockSize;
+  private int blockGroupSize;
 
   private MiniDFSCluster cluster;
   private FileContext fileContext;
   private DistributedFileSystem fs;
   private Configuration conf = new HdfsConfiguration();
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Before
   public void setup() throws IOException {
+ecPolicy = getEcPolicy();
+cellSize = ecPolicy.getCellSize();
+dataBlocks = (short) ecPolicy.getNumDataUnits();
+parityBlocks = (short) ecPolicy.getNumParityUnits();
+numDNs = dataBlocks + parityBlocks;
+stripesPerBlock = 4;
+blockSize = stripesPerBlock * cellSize;
+blockGroupSize = blockSize * dataBlocks;
+
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
 false);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
 fileContext = FileContext.getFileContext(cluster.getURI(0), conf);
 fs = cluster.getFileSystem();
-fs.enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
+fs.enableErasureCodingPolicy(ecPolicy.getName());
 fs.mkdirs(new Path("/ec"));
 cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec",
-StripedFileTestUtil.getDefaultECPolicy().getName());
+ecPolicy.getName());
   }
 
   @After
@@ -121,7 +132,7 @@ public class TestDistributedFileSystemWithECFile {
 
   @Test(timeout=6)
   public void testListECFilesSmallerThanOneStripe() throws Exception {
-int dataBlocksNum = 3;
+int dataBlocksNum = dataBlocks;
 createFile("/ec/smallstripe", cellSize * dataBlocksNum);
 RemoteIterator iter =
 cluster.getFileSystem().listFiles(new Path("/ec"), true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bd70094/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFileWithRandomECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFileWithRandomECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFileWithRandomECPolicy.java
new file mode 100644
index 000..afa7569

[10/50] [abbrv] hadoop git commit: YARN-7270. Fix unsafe casting from long to int for class Resource and its sub-classes. (Yufei)

2017-10-19 Thread haibochen
YARN-7270. Fix unsafe casting from long to int for class Resource and
its sub-classes. (Yufei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a27c2c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a27c2c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a27c2c3

Branch: refs/heads/YARN-1011
Commit: 7a27c2c367518e1bf6ee393391a2f9b412113819
Parents: f4fb669
Author: Yufei Gu 
Authored: Fri Oct 13 12:38:58 2017 -0700
Committer: Yufei Gu 
Committed: Fri Oct 13 12:41:59 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   | 14 +++
 .../api/records/impl/LightWeightResource.java   |  4 +-
 .../hadoop/yarn/api/records/TestResource.java   | 43 
 .../api/records/impl/pb/ResourcePBImpl.java |  4 +-
 .../hadoop/yarn/util/resource/Resources.java| 11 +
 .../hadoop/yarn/api/TestResourcePBImpl.java | 27 
 6 files changed, 90 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a27c2c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index acd0e60..9a5bc79 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -466,4 +466,18 @@ public abstract class Resource implements 
Comparable {
 }
 return (int) result;
   }
+
+  /**
+   * Convert long to int for a resource value safely. This method assumes
+   * resource value is positive.
+   *
+   * @param value long resource value
+   * @return int resource value
+   */
+  protected static int castToIntSafely(long value) {
+if (value > Integer.MAX_VALUE) {
+  return Integer.MAX_VALUE;
+}
+return Long.valueOf(value).intValue();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a27c2c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
index b80e133..a64d242 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -92,7 +92,7 @@ public class LightWeightResource extends Resource {
   @Override
   @SuppressWarnings("deprecation")
   public int getMemory() {
-return (int) memoryResInfo.getValue();
+return castToIntSafely(memoryResInfo.getValue());
   }
 
   @Override
@@ -113,7 +113,7 @@ public class LightWeightResource extends Resource {
 
   @Override
   public int getVirtualCores() {
-return (int) vcoresResInfo.getValue();
+return castToIntSafely(vcoresResInfo.getValue());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a27c2c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/TestResource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/TestResource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/TestResource.java
new file mode 100644
index 000..e0ec370
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/TestResource.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or 

[30/50] [abbrv] hadoop git commit: YARN-7341. TestRouterWebServiceUtil#testMergeMetrics is flakey. (Robert Kanter via Haibo Chen)

2017-10-19 Thread haibochen
YARN-7341. TestRouterWebServiceUtil#testMergeMetrics is flakey. (Robert Kanter 
via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/acabc657
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/acabc657
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/acabc657

Branch: refs/heads/YARN-1011
Commit: acabc657ff5433f36ce1b238cecd3a3b5bbe87ae
Parents: 31ebccc
Author: Haibo Chen 
Authored: Tue Oct 17 10:15:53 2017 -0700
Committer: Haibo Chen 
Committed: Tue Oct 17 10:15:53 2017 -0700

--
 .../router/webapp/RouterWebServiceUtil.java | 36 +++-
 .../router/webapp/TestRouterWebServiceUtil.java | 17 ++---
 2 files changed, 33 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/acabc657/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
index 5528e78..76435f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
@@ -406,22 +406,26 @@ public final class RouterWebServiceUtil {
 metrics.setContainersPending(metrics.getPendingContainers()
 + metricsResponse.getPendingContainers());
 
-metrics.setTotalMB(metrics.getTotalMB() + metricsResponse.getTotalMB());
-metrics.setTotalVirtualCores(
-metrics.getTotalVirtualCores() + metrics.getTotalVirtualCores());
-metrics.setTotalNodes(metrics.getTotalNodes() + metrics.getTotalNodes());
-metrics.setLostNodes(metrics.getLostNodes() + metrics.getLostNodes());
-metrics.setUnhealthyNodes(
-metrics.getUnhealthyNodes() + metrics.getUnhealthyNodes());
-metrics.setDecommissioningNodes(
-metrics.getDecommissioningNodes() + metrics.getDecommissioningNodes());
-metrics.setDecommissionedNodes(
-metrics.getDecommissionedNodes() + metrics.getDecommissionedNodes());
-metrics.setRebootedNodes(
-metrics.getRebootedNodes() + metrics.getRebootedNodes());
-metrics.setActiveNodes(metrics.getActiveNodes() + 
metrics.getActiveNodes());
-metrics.setShutdownNodes(
-metrics.getShutdownNodes() + metrics.getShutdownNodes());
+metrics.setTotalMB(metrics.getTotalMB()
++ metricsResponse.getTotalMB());
+metrics.setTotalVirtualCores(metrics.getTotalVirtualCores()
++ metricsResponse.getTotalVirtualCores());
+metrics.setTotalNodes(metrics.getTotalNodes()
++ metricsResponse.getTotalNodes());
+metrics.setLostNodes(metrics.getLostNodes()
++ metricsResponse.getLostNodes());
+metrics.setUnhealthyNodes(metrics.getUnhealthyNodes()
++ metricsResponse.getUnhealthyNodes());
+metrics.setDecommissioningNodes(metrics.getDecommissioningNodes()
++ metricsResponse.getDecommissioningNodes());
+metrics.setDecommissionedNodes(metrics.getDecommissionedNodes()
++ metricsResponse.getDecommissionedNodes());
+metrics.setRebootedNodes(metrics.getRebootedNodes()
++ metricsResponse.getRebootedNodes());
+metrics.setActiveNodes(metrics.getActiveNodes()
++ metricsResponse.getActiveNodes());
+metrics.setShutdownNodes(metrics.getShutdownNodes()
++ metricsResponse.getShutdownNodes());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acabc657/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServiceUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServiceUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServiceUtil.java
index 7073b3b..edf3804 100644
--- 

[27/50] [abbrv] hadoop git commit: HADOOP-14948. Document missing config key hadoop.treat.subject.external. Contributed by Ajay Kumar.

2017-10-19 Thread haibochen
HADOOP-14948. Document missing config key hadoop.treat.subject.external. 
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e906108f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e906108f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e906108f

Branch: refs/heads/YARN-1011
Commit: e906108fc98a011630d12a43e557b81d7ef7ea5d
Parents: 8a61525
Author: Wei-Chiu Chuang 
Authored: Mon Oct 16 16:42:59 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Oct 16 16:42:59 2017 -0700

--
 .../hadoop-common/src/main/resources/core-default.xml  | 13 +
 1 file changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e906108f/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index e7ea10f..bde7a85 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2763,4 +2763,17 @@
 the ZK CLI).
 
   
+  
+hadoop.treat.subject.external
+false
+
+  When creating UGI with UserGroupInformation(Subject), treat the passed
+  subject external if set to true, and assume the owner of the subject
+  should do the credential renewal.
+
+  When true this property will introduce an incompatible change which
+  may require changes in client code. For more details, see the jiras:
+  HADOOP-13805,HADOOP-13558.
+
+  
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HDFS-12659. Update TestDeadDatanode#testNonDFSUsedONDeadNodeReReg to increase heartbeat recheck interval. Contributed by Ajay Kumar.

2017-10-19 Thread haibochen
HDFS-12659. Update TestDeadDatanode#testNonDFSUsedONDeadNodeReReg to increase 
heartbeat recheck interval. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20575ece
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20575ece
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20575ece

Branch: refs/heads/YARN-1011
Commit: 20575ececb34e137d62e33d02d8928a7bdf4248a
Parents: 87ea1df
Author: Xiao Chen 
Authored: Sun Oct 15 19:25:29 2017 -0700
Committer: Xiao Chen 
Committed: Sun Oct 15 19:28:22 2017 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20575ece/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index 1860565..366f584 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -187,7 +187,8 @@ public class TestDeadDatanode {
   public void testNonDFSUsedONDeadNodeReReg() throws Exception {
 Configuration conf = new HdfsConfiguration();
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
-conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
+conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+3000);
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
 6 * 1000);
 long CAPACITY = 5000L;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: YARN-7180. Remove class ResourceType. Contributed by Sunil G.

2017-10-19 Thread haibochen
YARN-7180. Remove class ResourceType. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4fb6695
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4fb6695
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4fb6695

Branch: refs/heads/YARN-1011
Commit: f4fb6695a3a8f66d45001efe89f9950320c6a1e4
Parents: 0de40f0
Author: Rohith Sharma K S 
Authored: Fri Oct 13 15:17:05 2017 +0530
Committer: Rohith Sharma K S 
Committed: Fri Oct 13 15:17:05 2017 +0530

--
 .../resourcemanager/resource/ResourceType.java  | 28 
 .../fair/FairSchedulerConfiguration.java|  8 +++---
 2 files changed, 5 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4fb6695/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceType.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceType.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceType.java
deleted file mode 100644
index 9dd245b..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceType.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.yarn.server.resourcemanager.resource;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
-
-@Private
-@Evolving
-public enum ResourceType {
-  MEMORY, CPU
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4fb6695/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index 960299b..9c9eee6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 @Private
@@ -313,11 +313,13 @@ public class FairSchedulerConfiguration extends 
Configuration {
 
   private static double[] getResourcePercentage(
   String val) throws AllocationConfigurationException {
-double[] resourcePercentage = new double[ResourceType.values().length];
+int numberOfKnownResourceTypes = ResourceUtils
+.getNumberOfKnownResourceTypes();
+double[] 

[05/50] [abbrv] hadoop git commit: YARN-7317. Fix overallocation resulted from ceiling in LocalityMulticastAMRMProxyPolicy. (contributed by Botong Huang via curino)

2017-10-19 Thread haibochen
YARN-7317. Fix overallocation resulted from ceiling in 
LocalityMulticastAMRMProxyPolicy. (contributed by Botong Huang via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13fcfb3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13fcfb3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13fcfb3d

Branch: refs/heads/YARN-1011
Commit: 13fcfb3d46ee7a0d606b4bb221d1cd66ef2a5a7c
Parents: 075358e
Author: Carlo Curino 
Authored: Thu Oct 12 10:38:58 2017 -0700
Committer: Carlo Curino 
Committed: Thu Oct 12 10:38:58 2017 -0700

--
 .../policies/FederationPolicyUtils.java |  41 ++-
 .../LocalityMulticastAMRMProxyPolicy.java   | 103 ++---
 .../router/WeightedRandomRouterPolicy.java  |  33 ++
 .../policies/TestFederationPolicyUtils.java |  58 ++
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 110 ++-
 5 files changed, 279 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13fcfb3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java
index 7716a6f..aaa2c43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java
@@ -19,7 +19,9 @@ package org.apache.hadoop.yarn.server.federation.policies;
 
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
 import java.util.List;
+import java.util.Random;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
@@ -46,6 +48,8 @@ public final class FederationPolicyUtils {
   public static final String NO_ACTIVE_SUBCLUSTER_AVAILABLE =
   "No active SubCluster available to submit the request.";
 
+  private static final Random RAND = new Random(System.currentTimeMillis());
+
   /** Disable constructor. */
   private FederationPolicyUtils() {
   }
@@ -200,4 +204,39 @@ public final class FederationPolicyUtils {
 FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE);
   }
 
-}
\ No newline at end of file
+  /**
+   * Select a random bin according to the weight array for the bins. Only bins
+   * with positive weights will be considered. If no positive weight found,
+   * return -1.
+   *
+   * @param weights the weight array
+   * @return the index of the sample in the array
+   */
+  public static int getWeightedRandom(ArrayList weights) {
+int i;
+float totalWeight = 0;
+for (i = 0; i < weights.size(); i++) {
+  if (weights.get(i) > 0) {
+totalWeight += weights.get(i);
+  }
+}
+if (totalWeight == 0) {
+  return -1;
+}
+float samplePoint = RAND.nextFloat() * totalWeight;
+int lastIndex = 0;
+for (i = 0; i < weights.size(); i++) {
+  if (weights.get(i) > 0) {
+if (samplePoint <= weights.get(i)) {
+  return i;
+} else {
+  lastIndex = i;
+  samplePoint -= weights.get(i);
+}
+  }
+}
+// This can only happen if samplePoint is very close to totoalWeight and
+// float rounding kicks in during subtractions
+return lastIndex;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13fcfb3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 454962f..da30d98 100644
--- 

[01/50] [abbrv] hadoop git commit: HDFS-12542. Update javadoc and documentation for listStatus. Contributed by Ajay Kumar. [Forced Update!]

2017-10-19 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 6d9500c99 -> e5a996e85 (forced update)


HDFS-12542. Update javadoc and documentation for listStatus. Contributed by 
Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8acdf5c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8acdf5c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8acdf5c2

Branch: refs/heads/YARN-1011
Commit: 8acdf5c2742c081f3e0e96e13eb940a39964a58f
Parents: bb0a742
Author: Arpit Agarwal 
Authored: Wed Oct 11 12:29:35 2017 -0700
Committer: Arpit Agarwal 
Committed: Wed Oct 11 12:29:35 2017 -0700

--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java | 16 
 .../hadoop/fs/http/client/HttpFSFileSystem.java   | 18 --
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md  |  5 +
 3 files changed, 33 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8acdf5c2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index d3a8b23..0fb6f22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -1495,6 +1495,15 @@ public class WebHdfsFileSystem extends FileSystem
 }
   }
 
+  /**
+   * Get {@link FileStatus} of files/directories in the given path. If path
+   * corresponds to a file then {@link FileStatus} of that file is returned.
+   * Else if path represents a directory then {@link FileStatus} of all
+   * files/directories inside given path is returned.
+   *
+   * @param f given path
+   * @return the statuses of the files/directories in the given path
+   */
   @Override
   public FileStatus[] listStatus(final Path f) throws IOException {
 statistics.incrementReadOps(1);
@@ -1519,6 +1528,13 @@ public class WebHdfsFileSystem extends FileSystem
 
   private static final byte[] EMPTY_ARRAY = new byte[] {};
 
+  /**
+   * Get DirectoryEntries of the given path. DirectoryEntries contains an array
+   * of {@link FileStatus}, as well as iteration information.
+   *
+   * @param f given path
+   * @return DirectoryEntries for given path
+   */
   @Override
   public DirectoryEntries listStatusBatch(Path f, byte[] token) throws
   FileNotFoundException, IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8acdf5c2/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 3ff7a61..ffd64a1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -705,14 +705,13 @@ public class HttpFSFileSystem extends FileSystem
   }
 
   /**
-   * List the statuses of the files/directories in the given path if the path 
is
-   * a directory.
+   * Get {@link FileStatus} of files/directories in the given path. If path
+   * corresponds to a file then {@link FileStatus} of that file is returned.
+   * Else if path represents a directory then {@link FileStatus} of all
+   * files/directories inside given path is returned.
*
* @param f given path
-   *
-   * @return the statuses of the files/directories in the given patch
-   *
-   * @throws IOException
+   * @return the statuses of the files/directories in the given path
*/
   @Override
   public FileStatus[] listStatus(Path f) throws IOException {
@@ -725,6 +724,13 @@ public class HttpFSFileSystem extends FileSystem
 return toFileStatuses(json, f);
   }
 
+  /**
+   * Get {@link DirectoryEntries} of the given path. {@link DirectoryEntries}
+   * contains an array of {@link FileStatus}, as well as iteration information.
+   *
+   * @param f given path
+   * @return {@link DirectoryEntries} for given path
+   */
   @Override
   public DirectoryEntries listStatusBatch(Path f, byte[] token) throws
   FileNotFoundException, IOException {


[07/50] [abbrv] hadoop git commit: MAPREDUCE-5951. Add support for the YARN Shared Cache.

2017-10-19 Thread haibochen
MAPREDUCE-5951. Add support for the YARN Shared Cache.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e46d5bb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e46d5bb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e46d5bb9

Branch: refs/heads/YARN-1011
Commit: e46d5bb962b0c942f993afc505b165b1cd96e51b
Parents: 13fcfb3
Author: Chris Trezzo 
Authored: Thu Oct 12 10:58:02 2017 -0700
Committer: Chris Trezzo 
Committed: Thu Oct 12 10:59:20 2017 -0700

--
 .../mapreduce/v2/app/job/impl/JobImpl.java  |  16 +
 .../v2/app/job/impl/TaskAttemptImpl.java|  52 ++-
 .../mapreduce/v2/util/LocalResourceBuilder.java | 169 
 .../apache/hadoop/mapreduce/v2/util/MRApps.java | 137 ++
 .../TestLocalDistributedCacheManager.java   |   9 +
 .../hadoop/mapreduce/v2/util/TestMRApps.java|   8 +-
 .../hadoop-mapreduce-client-core/pom.xml|   6 +
 .../java/org/apache/hadoop/mapreduce/Job.java   | 226 ++
 .../hadoop/mapreduce/JobResourceUploader.java   | 416 ---
 .../apache/hadoop/mapreduce/MRJobConfig.java|  71 
 .../hadoop/mapreduce/SharedCacheConfig.java | 102 +
 .../src/main/resources/mapred-default.xml   |  11 +
 .../src/site/markdown/SharedCacheSupport.md | 100 +
 .../mapreduce/TestJobResourceUploader.java  |  76 ++--
 .../TestJobResourceUploaderWithSharedCache.java | 365 
 .../org/apache/hadoop/mapred/YARNRunner.java|  54 ++-
 .../hadoop/mapred/TestLocalJobSubmission.java   |  52 +++
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  |  59 +++
 hadoop-project/src/site/site.xml|   1 +
 19 files changed, 1701 insertions(+), 229 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e46d5bb9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 757c545..d2e2492 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.TaskCompletionEvent;
 import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -1414,6 +1415,19 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 new char[] {'"', '=', '.'});
   }
 
+  /*
+   * The goal is to make sure only the NM that hosts MRAppMaster will upload
+   * resources to shared cache. Clean up the shared cache policies for all
+   * resources so that later when TaskAttemptImpl creates
+   * ContainerLaunchContext, LocalResource.setShouldBeUploadedToSharedCache 
will
+   * be set up to false. In that way, the NMs that host the task containers
+   * won't try to upload the resources to shared cache.
+   */
+  private static void cleanupSharedCacheUploadPolicies(Configuration conf) {
+Job.setArchiveSharedCacheUploadPolicies(conf, Collections.emptyMap());
+Job.setFileSharedCacheUploadPolicies(conf, Collections.emptyMap());
+  }
+
   public static class InitTransition 
   implements MultipleArcTransition {
 
@@ -1492,6 +1506,8 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 job.allowedReduceFailuresPercent =
 job.conf.getInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 0);
 
+cleanupSharedCacheUploadPolicies(job.conf);
+
 // create the Tasks but don't start them yet
 createMapTasks(job, inputLength, taskSplitMetaInfo);
 createReduceTasks(job);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e46d5bb9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
--
diff --git 

[02/50] [abbrv] hadoop git commit: HADOOP-13556. Change Configuration.getPropsWithPrefix to use getProps instead of iterator. (Larry McCay via asuresh)

2017-10-19 Thread haibochen
HADOOP-13556. Change Configuration.getPropsWithPrefix to use getProps instead 
of iterator. (Larry McCay via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6c2c905
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6c2c905
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6c2c905

Branch: refs/heads/YARN-1011
Commit: b6c2c9058e83116dcca46cd6934db3428f931347
Parents: 8acdf5c
Author: Arun Suresh 
Authored: Wed Oct 11 15:21:21 2017 -0700
Committer: Arun Suresh 
Committed: Wed Oct 11 15:21:21 2017 -0700

--
 .../java/org/apache/hadoop/conf/Configuration.java   |  9 ++---
 .../org/apache/hadoop/conf/TestConfiguration.java| 15 +++
 2 files changed, 21 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6c2c905/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 2890853..9d5bb1b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2700,11 +2700,14 @@ public class Configuration implements 
Iterable>,
* @return mapping of configuration properties with prefix stripped
*/
   public Map getPropsWithPrefix(String confPrefix) {
+Properties props = getProps();
+Enumeration e = props.propertyNames();
 Map configMap = new HashMap<>();
-for (Map.Entry entry : this) {
-  String name = entry.getKey();
+String name = null;
+while (e.hasMoreElements()) {
+  name = (String) e.nextElement();
   if (name.startsWith(confPrefix)) {
-String value = this.get(name);
+String value = props.getProperty(name);
 name = name.substring(confPrefix.length());
 configMap.put(name, value);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6c2c905/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index a806b8c..52215da 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -2242,6 +2242,21 @@ public class TestConfiguration {
 FileUtil.fullyDelete(tmpDir);
   }
 
+  public void testGettingPropertiesWithPrefix() throws Exception {
+Configuration conf = new Configuration();
+for (int i = 0; i < 10; i++) {
+  conf.set("prefix" + ".name" + i, "value");
+}
+conf.set("different.prefix" + ".name", "value");
+Map props = conf.getPropsWithPrefix("prefix");
+assertEquals(props.size(), 10);
+
+// test call with no properties for a given prefix
+props = conf.getPropsWithPrefix("none");
+assertNotNull(props.isEmpty());
+assertTrue(props.isEmpty());
+  }
+
   public static void main(String[] argv) throws Exception {
 junit.textui.TestRunner.main(new String[]{
   TestConfiguration.class.getName()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12448. Make sure user defined erasure coding policy ID will not overflow. Contributed by Huafeng Wang

2017-10-19 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0f1c03761 -> ce7cf66e5


HDFS-12448. Make sure user defined erasure coding policy ID will not overflow. 
Contributed by Huafeng Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce7cf66e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce7cf66e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce7cf66e

Branch: refs/heads/trunk
Commit: ce7cf66e5ed74c124afdb5a6902fbf297211cc04
Parents: 0f1c037
Author: Kai Zheng 
Authored: Fri Oct 20 09:42:04 2017 +0800
Committer: Kai Zheng 
Committed: Fri Oct 20 09:42:04 2017 +0800

--
 .../io/erasurecode/ErasureCodeConstants.java|  1 +
 .../namenode/ErasureCodingPolicyManager.java| 20 +++
 .../src/site/markdown/HDFSErasureCoding.md  |  2 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 27 
 4 files changed, 44 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce7cf66e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index d3c3b6b..73b8f56 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -50,6 +50,7 @@ public final class ErasureCodeConstants {
   public static final ECSchema REPLICATION_1_2_SCHEMA = new ECSchema(
   REPLICATION_CODEC_NAME, 1, 2);
 
+  public static final byte MAX_POLICY_ID = Byte.MAX_VALUE;
   public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
   public static final byte REPLICATION_POLICY_ID = (byte) 63;
   public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce7cf66e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 90699b4..62c7f60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -253,6 +253,14 @@ public final class ErasureCodingPolicyManager {
 return p;
   }
 }
+
+if (getCurrentMaxPolicyID() == ErasureCodeConstants.MAX_POLICY_ID) {
+  throw new HadoopIllegalArgumentException("Adding erasure coding " +
+  "policy failed because the number of policies stored in the " +
+  "system already reached the threshold, which is " +
+  ErasureCodeConstants.MAX_POLICY_ID);
+}
+
 policy.setName(assignedNewName);
 policy.setId(getNextAvailablePolicyID());
 this.policiesByName.put(policy.getName(), policy);
@@ -261,12 +269,14 @@ public final class ErasureCodingPolicyManager {
 return policy;
   }
 
+  private byte getCurrentMaxPolicyID() {
+return policiesByID.keySet().stream().max(Byte::compareTo).orElse((byte)0);
+  }
+
   private byte getNextAvailablePolicyID() {
-byte currentId = this.policiesByID.keySet().stream()
-.max(Byte::compareTo)
-.filter(id -> id >= ErasureCodeConstants.USER_DEFINED_POLICY_START_ID)
-.orElse(ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
-return (byte) (currentId + 1);
+byte nextPolicyID = (byte)(getCurrentMaxPolicyID() + 1);
+return nextPolicyID > ErasureCodeConstants.USER_DEFINED_POLICY_START_ID ?
+nextPolicyID : ErasureCodeConstants.USER_DEFINED_POLICY_START_ID;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce7cf66e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index a171665..270201a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ 

hadoop git commit: HDFS-12448. Make sure user defined erasure coding policy ID will not overflow. Contributed by Huafeng Wang

2017-10-19 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 f4a2f0ee9 -> b7cb58a21


HDFS-12448. Make sure user defined erasure coding policy ID will not overflow. 
Contributed by Huafeng Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7cb58a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7cb58a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7cb58a2

Branch: refs/heads/branch-3.0
Commit: b7cb58a21d26d68a09b50edf4ed7be5febe8edce
Parents: f4a2f0e
Author: Kai Zheng 
Authored: Fri Oct 20 09:34:58 2017 +0800
Committer: Kai Zheng 
Committed: Fri Oct 20 09:34:58 2017 +0800

--
 .../io/erasurecode/ErasureCodeConstants.java|  1 +
 .../namenode/ErasureCodingPolicyManager.java| 20 +++
 .../src/site/markdown/HDFSErasureCoding.md  |  2 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 27 
 4 files changed, 44 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7cb58a2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index d3c3b6b..73b8f56 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -50,6 +50,7 @@ public final class ErasureCodeConstants {
   public static final ECSchema REPLICATION_1_2_SCHEMA = new ECSchema(
   REPLICATION_CODEC_NAME, 1, 2);
 
+  public static final byte MAX_POLICY_ID = Byte.MAX_VALUE;
   public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
   public static final byte REPLICATION_POLICY_ID = (byte) 63;
   public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7cb58a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 90699b4..62c7f60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -253,6 +253,14 @@ public final class ErasureCodingPolicyManager {
 return p;
   }
 }
+
+if (getCurrentMaxPolicyID() == ErasureCodeConstants.MAX_POLICY_ID) {
+  throw new HadoopIllegalArgumentException("Adding erasure coding " +
+  "policy failed because the number of policies stored in the " +
+  "system already reached the threshold, which is " +
+  ErasureCodeConstants.MAX_POLICY_ID);
+}
+
 policy.setName(assignedNewName);
 policy.setId(getNextAvailablePolicyID());
 this.policiesByName.put(policy.getName(), policy);
@@ -261,12 +269,14 @@ public final class ErasureCodingPolicyManager {
 return policy;
   }
 
+  private byte getCurrentMaxPolicyID() {
+return policiesByID.keySet().stream().max(Byte::compareTo).orElse((byte)0);
+  }
+
   private byte getNextAvailablePolicyID() {
-byte currentId = this.policiesByID.keySet().stream()
-.max(Byte::compareTo)
-.filter(id -> id >= ErasureCodeConstants.USER_DEFINED_POLICY_START_ID)
-.orElse(ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
-return (byte) (currentId + 1);
+byte nextPolicyID = (byte)(getCurrentMaxPolicyID() + 1);
+return nextPolicyID > ErasureCodeConstants.USER_DEFINED_POLICY_START_ID ?
+nextPolicyID : ErasureCodeConstants.USER_DEFINED_POLICY_START_ID;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7cb58a2/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index c8ef6c7..47b15ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ 

hadoop git commit: HDFS-12620. Backporting HDFS-10467 to branch-2. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7b4b01878 -> 0f1c03761


HDFS-12620. Backporting HDFS-10467 to branch-2. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f1c0376
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f1c0376
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f1c0376

Branch: refs/heads/trunk
Commit: 0f1c0376186de6446d595be7bb445ed6b71ae499
Parents: 7b4b018
Author: Inigo Goiri 
Authored: Thu Oct 19 18:08:45 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 18:08:45 2017 -0700

--
 .../org/apache/hadoop/hdfs/server/federation/MockResolver.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f1c0376/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index a481553..151d731 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -264,7 +264,10 @@ public class MockResolver
   @Override
   public PathLocation getDestinationForPath(String path) throws IOException {
 List remoteLocations = new LinkedList<>();
-for (String key : this.locations.keySet()) {
+// We go from the leaves to the root
+List keys = new ArrayList<>(this.locations.keySet());
+Collections.sort(keys, Collections.reverseOrder());
+for (String key : keys) {
   if (path.startsWith(key)) {
 for (RemoteLocation location : this.locations.get(key)) {
   String finalPath = location.getDest() + path.substring(key.length());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12620. Backporting HDFS-10467 to branch-2. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b2f516118 -> f4a2f0ee9


HDFS-12620. Backporting HDFS-10467 to branch-2. Contributed by Inigo Goiri.

(cherry picked from commit 0f1c0376186de6446d595be7bb445ed6b71ae499)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4a2f0ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4a2f0ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4a2f0ee

Branch: refs/heads/branch-3.0
Commit: f4a2f0ee9f5ee261ada8686b0a8ffa2b19f01a83
Parents: b2f5161
Author: Inigo Goiri 
Authored: Thu Oct 19 18:08:45 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 18:10:47 2017 -0700

--
 .../org/apache/hadoop/hdfs/server/federation/MockResolver.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4a2f0ee/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index a481553..151d731 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -264,7 +264,10 @@ public class MockResolver
   @Override
   public PathLocation getDestinationForPath(String path) throws IOException {
 List remoteLocations = new LinkedList<>();
-for (String key : this.locations.keySet()) {
+// We go from the leaves to the root
+List keys = new ArrayList<>(this.locations.keySet());
+Collections.sort(keys, Collections.reverseOrder());
+for (String key : keys) {
   if (path.startsWith(key)) {
 for (RemoteLocation location : this.locations.get(key)) {
   String finalPath = location.getDest() + path.substring(key.length());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/32] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f95eac8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
new file mode 100644
index 000..3a32be1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -0,0 +1,856 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * A client proxy for Router -> NN communication using the NN ClientProtocol.
+ * 
+ * Provides routers to invoke remote ClientProtocol methods and handle
+ * retries/failover.
+ * 
+ * invokeSingle Make a single request to a single namespace
+ * invokeSequential Make a sequential series of requests to multiple
+ * ordered namespaces until a condition is met.
+ * invokeConcurrent Make concurrent requests to multiple namespaces and
+ * return all of the results.
+ * 
+ * Also maintains a cached pool of connections to NNs. Connections are managed
+ * by the ConnectionManager and are unique to each user + NN. The size of the
+ * connection pool can be configured. Larger pools allow for more simultaneous
+ * requests to a single NN from a single user.
+ */
+public class RouterRpcClient {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(RouterRpcClient.class);
+
+
+  /** Router identifier. */
+  private final String routerId;
+
+  /** Interface to identify the active NN for a nameservice or blockpool ID. */
+  private final ActiveNamenodeResolver namenodeResolver;
+
+  /** Connection pool to the Namenodes per user for performance. */
+  private final ConnectionManager connectionManager;
+  /** Service to run asynchronous calls. */
+  private final ExecutorService executorService;
+  /** Retry policy for router -> NN communication. */
+  private final RetryPolicy retryPolicy;
+
+  /** Pattern to parse a stack trace line. */
+  private static final Pattern STACK_TRACE_PATTERN =
+  Pattern.compile("\\tat (.*)\\.(.*)\\((.*):(\\d*)\\)");
+
+
+  /**
+   * Create a router RPC 

[16/32] hadoop git commit: HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc7c12ff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
new file mode 100644
index 000..7f7c998
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProtoOrBuilder;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * RemoveMountTableEntryRequest.
+ */
+public class RemoveMountTableEntryRequestPBImpl
+extends RemoveMountTableEntryRequest implements PBRecord {
+
+  private FederationProtocolPBTranslator translator =
+  new FederationProtocolPBTranslator(
+  RemoveMountTableEntryRequestProto.class);
+
+  public RemoveMountTableEntryRequestPBImpl() {
+  }
+
+  public RemoveMountTableEntryRequestPBImpl(
+  RemoveMountTableEntryRequestProto proto) {
+this.setProto(proto);
+  }
+
+  @Override
+  public RemoveMountTableEntryRequestProto getProto() {
+return this.translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public String getSrcPath() {
+return this.translator.getProtoOrBuilder().getSrcPath();
+  }
+
+  @Override
+  public void setSrcPath(String path) {
+this.translator.getBuilder().setSrcPath(path);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc7c12ff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
new file mode 100644
index 000..0c943ac
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * 

[19/32] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

(cherry picked from commit ae27e31fbcf546481db0b0345772db2e9132372e)
(cherry picked from commit b3e6bd22e3c02b3e4f50396538f56a1bcb007638)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78abcb8f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78abcb8f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78abcb8f

Branch: refs/heads/branch-2
Commit: 78abcb8f524095d09c6eabb4c5424660a3ee0d34
Parents: cc7c12f
Author: Inigo Goiri 
Authored: Tue Aug 8 14:44:43 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:54 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   1 +
 .../hadoop-hdfs/src/main/bin/hdfs.cmd   |   7 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  19 ++
 .../hdfs/protocolPB/RouterAdminProtocolPB.java  |  44 +++
 ...uterAdminProtocolServerSideTranslatorPB.java | 151 
 .../RouterAdminProtocolTranslatorPB.java| 150 
 .../resolver/MembershipNamenodeResolver.java|  34 +-
 .../hdfs/server/federation/router/Router.java   |  52 +++
 .../federation/router/RouterAdminServer.java| 183 ++
 .../server/federation/router/RouterClient.java  |  76 +
 .../hdfs/tools/federation/RouterAdmin.java  | 341 +++
 .../hdfs/tools/federation/package-info.java |  28 ++
 .../src/main/proto/RouterProtocol.proto |  47 +++
 .../src/main/resources/hdfs-default.xml |  46 +++
 .../server/federation/RouterConfigBuilder.java  |  26 ++
 .../server/federation/RouterDFSCluster.java |  43 ++-
 .../server/federation/StateStoreDFSCluster.java | 148 
 .../federation/router/TestRouterAdmin.java  | 261 ++
 18 files changed, 1639 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78abcb8f/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 4f9b782..453c919 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -349,6 +349,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   editlog.proto
   fsimage.proto
   FederationProtocol.proto
+  RouterProtocol.proto
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78abcb8f/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index b9853d6..53bdf70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
 )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router debug
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router federation debug
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -184,6 +184,11 @@ goto :eof
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
   goto :eof
 
+:federation
+  set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin
+  set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
+  goto :eof
+
 :debug
   set CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
   goto :eof

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78abcb8f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f3bc592..b161bc0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1085,6 +1085,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_STORE_PREFIX =
   FEDERATION_ROUTER_PREFIX + "store.";
 
+  public static final String DFS_ROUTER_STORE_ENABLE =

[30/32] hadoop git commit: HDFS-12273. Federation UI. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12273. Federation UI. Contributed by Inigo Goiri.

(cherry picked from commit adbb2e00c7b85524fd43bd68895d49814c16680a)
(cherry picked from commit 81601dac8ec7650bec14700b174910390a92fe1f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27295eec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27295eec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27295eec

Branch: refs/heads/branch-2
Commit: 27295eecdd5775c4fb6c899d515728a7e1972e43
Parents: 7d79598
Author: Inigo Goiri 
Authored: Thu Oct 5 17:26:43 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:39:19 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  19 +
 .../federation/metrics/FederationMBean.java |   7 +
 .../federation/metrics/FederationMetrics.java   |  25 +-
 .../resolver/MembershipNamenodeResolver.java|  23 ++
 .../hdfs/server/federation/router/Router.java   |  36 ++
 .../federation/router/RouterHttpServer.java | 124 +++
 .../federation/router/RouterRpcClient.java  |  45 ++-
 .../federation/router/RouterRpcServer.java  |  15 +-
 .../src/main/resources/hdfs-default.xml |  56 +++
 .../main/webapps/router/federationhealth.html   | 371 +++
 .../src/main/webapps/router/federationhealth.js | 313 
 .../src/main/webapps/router/index.html  |  24 ++
 .../server/federation/RouterConfigBuilder.java  |  13 +
 .../server/federation/RouterDFSCluster.java |  29 +-
 .../federation/metrics/TestMetricsBase.java |   1 +
 .../server/federation/router/TestRouter.java|   9 +-
 17 files changed, 1102 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27295eec/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 8ae3db8..154e4f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -276,6 +276,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
+
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27295eec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3606e7a..3f967da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1141,6 +1141,25 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_ROUTER_PREFIX + "admin.enable";
   public static final boolean DFS_ROUTER_ADMIN_ENABLE_DEFAULT = true;
 
+  // HDFS Router-based federation web
+  public static final String DFS_ROUTER_HTTP_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "http.enable";
+  public static final boolean DFS_ROUTER_HTTP_ENABLE_DEFAULT = true;
+  public static final String DFS_ROUTER_HTTP_ADDRESS_KEY =
+  FEDERATION_ROUTER_PREFIX + "http-address";
+  public static final intDFS_ROUTER_HTTP_PORT_DEFAULT = 50071;
+  public static final String DFS_ROUTER_HTTP_BIND_HOST_KEY =
+  FEDERATION_ROUTER_PREFIX + "http-bind-host";
+  public static final String DFS_ROUTER_HTTP_ADDRESS_DEFAULT =
+  "0.0.0.0:" + DFS_ROUTER_HTTP_PORT_DEFAULT;
+  public static final String DFS_ROUTER_HTTPS_ADDRESS_KEY =
+  FEDERATION_ROUTER_PREFIX + "https-address";
+  public static final intDFS_ROUTER_HTTPS_PORT_DEFAULT = 50072;
+  public static final String DFS_ROUTER_HTTPS_BIND_HOST_KEY =
+  FEDERATION_ROUTER_PREFIX + "https-bind-host";
+  public static final String DFS_ROUTER_HTTPS_ADDRESS_DEFAULT =
+  "0.0.0.0:" + DFS_ROUTER_HTTPS_PORT_DEFAULT;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27295eec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java
 

[15/32] hadoop git commit: HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri.

(cherry picked from commit 928f8dab52191e733984d37f47b69719ccf11313)
(cherry picked from commit d8c81073320320a019fb3868be4f06f46aebea43)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e700db60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e700db60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e700db60

Branch: refs/heads/branch-2
Commit: e700db60a619dbb81d33abb762491e164a104cea
Parents: 0228ead
Author: Inigo Goiri 
Authored: Tue Aug 1 14:40:27 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:32 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  38 ++
 .../resolver/NamenodeStatusReport.java  | 193 ++
 .../federation/router/FederationUtil.java   |  66 
 .../router/NamenodeHeartbeatService.java| 350 +++
 .../hdfs/server/federation/router/Router.java   | 112 ++
 .../src/main/resources/hdfs-default.xml |  32 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   8 +
 .../hdfs/server/federation/MockResolver.java|   9 +-
 .../server/federation/RouterConfigBuilder.java  |  22 ++
 .../server/federation/RouterDFSCluster.java |  43 +++
 .../router/TestNamenodeHeartbeat.java   | 168 +
 .../server/federation/router/TestRouter.java|   3 +
 13 files changed, 1057 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e700db60/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 2e16a50..6d06bf2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1039,6 +1039,20 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_ROUTER_PREFIX + "rpc.enable";
   public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
 
+  // HDFS Router heartbeat
+  public static final String DFS_ROUTER_HEARTBEAT_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "heartbeat.enable";
+  public static final boolean DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT = true;
+  public static final String DFS_ROUTER_HEARTBEAT_INTERVAL_MS =
+  FEDERATION_ROUTER_PREFIX + "heartbeat.interval";
+  public static final long DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
+  public static final String DFS_ROUTER_MONITOR_NAMENODE =
+  FEDERATION_ROUTER_PREFIX + "monitor.namenode";
+  public static final String DFS_ROUTER_MONITOR_LOCAL_NAMENODE =
+  FEDERATION_ROUTER_PREFIX + "monitor.localnamenode.enable";
+  public static final boolean DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT = true;
+
   // HDFS Router NN client
   public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
   FEDERATION_ROUTER_PREFIX + "connection.pool-size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e700db60/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 7d1102f..d4fd5f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1239,6 +1239,44 @@ public class DFSUtil {
   }
 
   /**
+   * Map a logical namenode ID to its web address. Use the given nameservice if
+   * specified, or the configured one if none is given.
+   *
+   * @param conf Configuration
+   * @param nsId which nameservice nnId is a part of, optional
+   * @param nnId the namenode ID to get the service addr for
+   * @return the service addr, null if it could not be determined
+   */
+  public static String getNamenodeWebAddr(final Configuration conf, String 
nsId,
+  String nnId) {
+
+if (nsId == null) {
+  nsId = getOnlyNameServiceIdOrNull(conf);
+}
+
+String webAddrKey = DFSUtilClient.concatSuffixes(
+DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nsId, nnId);
+
+String webAddr =
+conf.get(webAddrKey, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT);
+return webAddr;
+  }
+
+  /**
+   * Get 

[10/32] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo 
Goiri.

(cherry picked from commit 8a9cdebebf26841a0f1e99fb08135f4597f2eba2)
(cherry picked from commit ca4f209b49e3aad6a80306f7342c9b6b560a79a7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f95eac8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f95eac8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f95eac8

Branch: refs/heads/branch-2
Commit: 2f95eac8f6e1c03b90c49b01ee2b24b8d69f7d32
Parents: 7bd91fa
Author: Inigo Goiri 
Authored: Thu May 11 09:57:03 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:28 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   38 +
 .../resolver/FederationNamespaceInfo.java   |   46 +-
 .../federation/resolver/RemoteLocation.java |   46 +-
 .../federation/router/ConnectionContext.java|  104 +
 .../federation/router/ConnectionManager.java|  408 
 .../federation/router/ConnectionPool.java   |  314 +++
 .../federation/router/ConnectionPoolId.java |  117 ++
 .../router/RemoteLocationContext.java   |   38 +-
 .../server/federation/router/RemoteMethod.java  |  164 ++
 .../server/federation/router/RemoteParam.java   |   71 +
 .../hdfs/server/federation/router/Router.java   |   58 +-
 .../federation/router/RouterRpcClient.java  |  856 
 .../federation/router/RouterRpcServer.java  | 1867 +-
 .../src/main/resources/hdfs-default.xml |   95 +
 .../server/federation/FederationTestUtils.java  |   80 +-
 .../hdfs/server/federation/MockResolver.java|   90 +-
 .../server/federation/RouterConfigBuilder.java  |   20 +-
 .../server/federation/RouterDFSCluster.java |  535 +++--
 .../server/federation/router/TestRouter.java|   31 +-
 .../server/federation/router/TestRouterRpc.java |  869 
 .../router/TestRouterRpcMultiDestination.java   |  216 ++
 21 files changed, 5675 insertions(+), 388 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f95eac8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1b66ead..5d6c467 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1012,6 +1012,44 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   // HDFS Router-based federation
   public static final String FEDERATION_ROUTER_PREFIX =
   "dfs.federation.router.";
+  public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
+  FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
+  public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
+  FEDERATION_ROUTER_PREFIX + "handler.count";
+  public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;
+  public static final String DFS_ROUTER_READER_QUEUE_SIZE_KEY =
+  FEDERATION_ROUTER_PREFIX + "reader.queue.size";
+  public static final int DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT = 100;
+  public static final String DFS_ROUTER_READER_COUNT_KEY =
+  FEDERATION_ROUTER_PREFIX + "reader.count";
+  public static final int DFS_ROUTER_READER_COUNT_DEFAULT = 1;
+  public static final String DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY =
+  FEDERATION_ROUTER_PREFIX + "handler.queue.size";
+  public static final int DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT = 100;
+  public static final String DFS_ROUTER_RPC_BIND_HOST_KEY =
+  FEDERATION_ROUTER_PREFIX + "rpc-bind-host";
+  public static final int DFS_ROUTER_RPC_PORT_DEFAULT = ;
+  public static final String DFS_ROUTER_RPC_ADDRESS_KEY =
+  FEDERATION_ROUTER_PREFIX + "rpc-address";
+  public static final String DFS_ROUTER_RPC_ADDRESS_DEFAULT =
+  "0.0.0.0:" + DFS_ROUTER_RPC_PORT_DEFAULT;
+  public static final String DFS_ROUTER_RPC_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "rpc.enable";
+  public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
+
+  // HDFS Router NN client
+  public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
+  FEDERATION_ROUTER_PREFIX + "connection.pool-size";
+  public static final int DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT =
+  64;
+  public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN =
+  FEDERATION_ROUTER_PREFIX + "connection.pool.clean.ms";
+  public static final long DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN_DEFAULT =
+  TimeUnit.MINUTES.toMillis(1);
+  

[17/32] hadoop git commit: HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-10880. Federation Mount Table State Store internal API. Contributed by 
Jason Kace and Inigo Goiri.

(cherry picked from commit 58b97df661441150d35abd44b3a8606206b46441)
(cherry picked from commit 6f0de2731806628b5b01bd1350225692147590da)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc7c12ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc7c12ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc7c12ff

Branch: refs/heads/branch-2
Commit: cc7c12ff18c561285b09876ff14ad9a11809b923
Parents: e700db6
Author: Inigo Goiri 
Authored: Fri Aug 4 18:00:12 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:33 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   7 +-
 .../federation/resolver/MountTableManager.java  |  80 +++
 .../federation/resolver/MountTableResolver.java | 544 +++
 .../federation/resolver/PathLocation.java   | 124 -
 .../resolver/order/DestinationOrder.java|  29 +
 .../federation/resolver/order/package-info.java |  29 +
 .../federation/router/FederationUtil.java   |  56 +-
 .../hdfs/server/federation/router/Router.java   |   3 +-
 .../federation/store/MountTableStore.java   |  49 ++
 .../federation/store/StateStoreService.java |   2 +
 .../store/impl/MountTableStoreImpl.java | 116 
 .../protocol/AddMountTableEntryRequest.java |  47 ++
 .../protocol/AddMountTableEntryResponse.java|  42 ++
 .../protocol/GetMountTableEntriesRequest.java   |  49 ++
 .../protocol/GetMountTableEntriesResponse.java  |  53 ++
 .../protocol/RemoveMountTableEntryRequest.java  |  49 ++
 .../protocol/RemoveMountTableEntryResponse.java |  42 ++
 .../protocol/UpdateMountTableEntryRequest.java  |  51 ++
 .../protocol/UpdateMountTableEntryResponse.java |  43 ++
 .../pb/AddMountTableEntryRequestPBImpl.java |  84 +++
 .../pb/AddMountTableEntryResponsePBImpl.java|  76 +++
 .../pb/GetMountTableEntriesRequestPBImpl.java   |  76 +++
 .../pb/GetMountTableEntriesResponsePBImpl.java  | 104 
 .../pb/RemoveMountTableEntryRequestPBImpl.java  |  76 +++
 .../pb/RemoveMountTableEntryResponsePBImpl.java |  76 +++
 .../pb/UpdateMountTableEntryRequestPBImpl.java  |  96 
 .../pb/UpdateMountTableEntryResponsePBImpl.java |  76 +++
 .../federation/store/records/MountTable.java| 301 ++
 .../store/records/impl/pb/MountTablePBImpl.java | 213 
 .../src/main/proto/FederationProtocol.proto |  61 ++-
 .../hdfs/server/federation/MockResolver.java|   9 +-
 .../resolver/TestMountTableResolver.java| 396 ++
 .../store/FederationStateStoreTestUtils.java|  16 +
 .../store/TestStateStoreMountTable.java | 250 +
 .../store/driver/TestStateStoreDriverBase.java  |  12 +
 .../store/records/TestMountTable.java   | 176 ++
 36 files changed, 3437 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc7c12ff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 6d06bf2..f3bc592 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
@@ -1070,8 +1072,9 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   // HDFS Router State Store connection
   public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS =
   FEDERATION_ROUTER_PREFIX + "file.resolver.client.class";
-  public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT =
-  "org.apache.hadoop.hdfs.server.federation.MockResolver";
+  public static final Class
+  FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT =
+  MountTableResolver.class;
  

[24/32] hadoop git commit: HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/550ce086/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java
new file mode 100644
index 000..851538a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
+import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * This class is for maintaining the various Router activity statistics
+ * and publishing them through the metrics interfaces.
+ */
+@Metrics(name="RouterActivity", about="Router metrics", context="dfs")
+public class RouterMetrics {
+
+  private final MetricsRegistry registry = new MetricsRegistry("router");
+
+  @Metric("Duration in SafeMode at startup in msec")
+  private MutableGaugeInt safeModeTime;
+
+  private JvmMetrics jvmMetrics = null;
+
+  RouterMetrics(
+  String processName, String sessionId, final JvmMetrics jvmMetrics) {
+this.jvmMetrics = jvmMetrics;
+registry.tag(ProcessName, processName).tag(SessionId, sessionId);
+  }
+
+  public static RouterMetrics create(Configuration conf) {
+String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+String processName = "Router";
+MetricsSystem ms = DefaultMetricsSystem.instance();
+JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);
+
+return ms.register(new RouterMetrics(processName, sessionId, jm));
+  }
+
+  public JvmMetrics getJvmMetrics() {
+return jvmMetrics;
+  }
+
+  public void shutdown() {
+DefaultMetricsSystem.shutdown();
+  }
+
+  public void setSafeModeTime(long elapsed) {
+safeModeTime.set((int) elapsed);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/550ce086/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java
new file mode 100644
index 000..f4debce
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */

[21/32] hadoop git commit: HDFS-12312. Rebasing HDFS-10467 (2). Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12312. Rebasing HDFS-10467 (2). Contributed by Inigo Goiri.

(cherry picked from commit 90ba6843fb3ac5dc7576535e66a75a5e3433247b)
(cherry picked from commit 346c9fce43ebf6a90fc56e0dc7c403f97cc5391f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36c3515f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36c3515f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36c3515f

Branch: refs/heads/branch-2
Commit: 36c3515f76621006fa762b0cb7d2f5a5c985
Parents: 0b599cd
Author: Inigo Goiri 
Authored: Wed Aug 16 17:31:37 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:39:08 2017 -0700

--
 .../hadoop/hdfs/server/federation/router/RouterRpcServer.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36c3515f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index eaaab39..c77d255 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -1946,6 +1946,7 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
 }
 long inodeId = 0;
 return new HdfsFileStatus(0, true, 0, 0, modTime, accessTime, permission,
+EnumSet.noneOf(HdfsFileStatus.Flags.class),
 owner, group, new byte[0], DFSUtil.string2Bytes(name), inodeId,
 childrenNum, null, (byte) 0, null);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/32] hadoop git commit: HDFS-12381. [Documentation] Adding configuration keys for the Router. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12381. [Documentation] Adding configuration keys for the Router. 
Contributed by Inigo Goiri.

(cherry picked from commit c2d6aa79055ef72406fa598e1c743b0c994b5da8)
(cherry picked from commit ad41c8155940f4da0e51439c97c6cc9c808c28df)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8a71e2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8a71e2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8a71e2f

Branch: refs/heads/branch-2
Commit: f8a71e2f10f699a9e9f25bc1eadcb2af6b592921
Parents: 9ad1f90
Author: Inigo Goiri 
Authored: Fri Sep 22 13:06:10 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:39:17 2017 -0700

--
 .../src/main/resources/hdfs-default.xml |  11 +-
 .../src/site/markdown/HDFSRouterFederation.md   | 159 +--
 2 files changed, 156 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8a71e2f/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 550e5df..50ce6f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4366,7 +4366,8 @@
 dfs.federation.router.rpc.enable
 true
 
-  If the RPC service to handle client requests in the router is enabled.
+  If true, the RPC service to handle client requests in the router is
+  enabled.
 
   
 
@@ -4470,7 +4471,7 @@
 dfs.federation.router.admin.enable
 true
 
-  If the RPC admin service to handle client requests in the router is
+  If true, the RPC admin service to handle client requests in the router is
   enabled.
 
   
@@ -4524,7 +4525,7 @@
 dfs.federation.router.store.enable
 true
 
-  If the Router connects to the State Store.
+  If true, the Router connects to the State Store.
 
   
 
@@ -4572,7 +4573,7 @@
 dfs.federation.router.heartbeat.enable
 true
 
-  Enables the Router to heartbeat into the State Store.
+  If true, the Router heartbeats into the State Store.
 
   
 
@@ -4596,7 +4597,7 @@
 dfs.federation.router.monitor.localnamenode.enable
 true
 
-  If the Router should monitor the namenode in the local machine.
+  If true, the Router should monitor the namenode in the local machine.
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8a71e2f/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index f094238..1cea7f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -21,7 +21,7 @@ Introduction
 
 
 NameNodes have scalability limits because of the metadata overhead comprised 
of inodes (files and directories) and file blocks, the number of Datanode 
heartbeats, and the number of HDFS RPC client requests.
-The common solution is to split the filesystem into smaller subclusters [HDFS 
Federation](.Federation.html) and provide a federated view 
[ViewFs](.ViewFs.html).
+The common solution is to split the filesystem into smaller subclusters [HDFS 
Federation](./Federation.html) and provide a federated view 
[ViewFs](./ViewFs.html).
 The problem is how to maintain the split of the subclusters (e.g., namespace 
partition), which forces users to connect to multiple subclusters and manage 
the allocation of folders/files to them.
 
 
@@ -35,7 +35,7 @@ This layer must be scalable, highly available, and fault 
tolerant.
 
 This federation layer comprises multiple components.
 The _Router_ component that has the same interface as a NameNode, and forwards 
the client requests to the correct subcluster, based on ground-truth 
information from a State Store.
-The _State Store_ combines a remote _Mount Table_ (in the flavor of 
[ViewFs](.ViewFs.html), but shared between clients) and utilization 
(load/capacity) information about the subclusters.
+The _State Store_ combines a remote _Mount Table_ (in the flavor of 
[ViewFs](./ViewFs.html), but shared between clients) and utilization 
(load/capacity) information about the subclusters.
 This approach has the same architecture as [YARN 
federation](../hadoop-yarn/Federation.html).
 
 ![Router-based Federation 

[01/32] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 99ce49c52 -> 12c81c67d


http://git-wip-us.apache.org/repos/asf/hadoop/blob/096e8c4f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
new file mode 100644
index 000..ee6f57d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.NamenodePriorityComparator;
+import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
+import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.util.Time;
+
+/**
+ * In-memory cache/mock of a namenode and file resolver. Stores the most
+ * recently updated NN information for each nameservice and block pool. Also
+ * stores a virtual mount table for resolving global namespace paths to local 
NN
+ * paths.
+ */
+public class MockResolver
+implements ActiveNamenodeResolver, FileSubclusterResolver {
+
+  private Map resolver =
+  new HashMap();
+  private Map locations =
+  new HashMap();
+  private Set namespaces =
+  new HashSet();
+  private String defaultNamespace = null;
+
+  public MockResolver(Configuration conf, StateStoreService store) {
+this.cleanRegistrations();
+  }
+
+  public void addLocation(String mount, String nameservice, String location) {
+RemoteLocation remoteLocation = new RemoteLocation(nameservice, location);
+List locationsList = locations.get(mount);
+if (locationsList == null) {
+  locationsList = new LinkedList();
+  locations.put(mount, locationsList);
+}
+if (!locationsList.contains(remoteLocation)) {
+  locationsList.add(remoteLocation);
+}
+
+if (this.defaultNamespace == null) {
+  this.defaultNamespace = nameservice;
+}
+  }
+
+  public synchronized void cleanRegistrations() {
+this.resolver =
+new HashMap();
+this.namespaces = new HashSet();
+  }
+
+  @Override
+  public void updateActiveNamenode(
+  String ns, InetSocketAddress successfulAddress) {
+
+String address = successfulAddress.getHostName() + ":" +
+successfulAddress.getPort();
+String key = ns;
+if (key != null) {
+  // Update the active entry
+  @SuppressWarnings("unchecked")
+  List iterator =
+  (List) resolver.get(key);
+  for (FederationNamenodeContext namenode : iterator) {
+if (namenode.getRpcAddress().equals(address)) {
+  MockNamenodeContext nn = (MockNamenodeContext) namenode;
+  nn.setState(FederationNamenodeServiceState.ACTIVE);
+  break;
+}
+  }
+  Collections.sort(iterator, new NamenodePriorityComparator());
+}
+  }
+
+  @Override
+  public List
+  

[13/32] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0228ead4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
new file mode 100644
index 000..1f0d556
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+
+/**
+ * API response for overriding an existing namenode registration in the state
+ * store.
+ */
+public abstract class UpdateNamenodeRegistrationResponse {
+
+  public static UpdateNamenodeRegistrationResponse newInstance() {
+return StateStoreSerializer.newRecord(
+UpdateNamenodeRegistrationResponse.class);
+  }
+
+  public static UpdateNamenodeRegistrationResponse newInstance(boolean status)
+  throws IOException {
+UpdateNamenodeRegistrationResponse response = newInstance();
+response.setResult(status);
+return response;
+  }
+
+  @Private
+  @Unstable
+  public abstract boolean getResult();
+
+  @Private
+  @Unstable
+  public abstract void setResult(boolean result);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0228ead4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
new file mode 100644
index 000..baad113
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+
+import org.apache.commons.codec.binary.Base64;
+
+import com.google.protobuf.GeneratedMessage;
+import com.google.protobuf.Message;
+import com.google.protobuf.Message.Builder;
+import com.google.protobuf.MessageOrBuilder;
+
+/**
+ * Helper class for setting/getting data elements in an object backed by a
+ * protobuf implementation.
+ */
+public class FederationProtocolPBTranslator {
+
+  /** Optional proto byte stream used to create this object. */
+  private P proto;
+  /** The class of the proto handler for this 

[07/32] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f95eac8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index ee6f57d..2875750 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.util.Time;
 
 /**
  * In-memory cache/mock of a namenode and file resolver. Stores the most
- * recently updated NN information for each nameservice and block pool. Also
+ * recently updated NN information for each nameservice and block pool. It also
  * stores a virtual mount table for resolving global namespace paths to local 
NN
  * paths.
  */
@@ -51,82 +51,93 @@ public class MockResolver
 implements ActiveNamenodeResolver, FileSubclusterResolver {
 
   private Map resolver =
-  new HashMap();
-  private Map locations =
-  new HashMap();
-  private Set namespaces =
-  new HashSet();
+  new HashMap<>();
+  private Map locations = new HashMap<>();
+  private Set namespaces = new HashSet<>();
   private String defaultNamespace = null;
 
+
   public MockResolver(Configuration conf, StateStoreService store) {
 this.cleanRegistrations();
   }
 
-  public void addLocation(String mount, String nameservice, String location) {
-RemoteLocation remoteLocation = new RemoteLocation(nameservice, location);
-List locationsList = locations.get(mount);
+  public void addLocation(String mount, String nsId, String location) {
+List locationsList = this.locations.get(mount);
 if (locationsList == null) {
-  locationsList = new LinkedList();
-  locations.put(mount, locationsList);
+  locationsList = new LinkedList<>();
+  this.locations.put(mount, locationsList);
 }
+
+final RemoteLocation remoteLocation = new RemoteLocation(nsId, location);
 if (!locationsList.contains(remoteLocation)) {
   locationsList.add(remoteLocation);
 }
 
 if (this.defaultNamespace == null) {
-  this.defaultNamespace = nameservice;
+  this.defaultNamespace = nsId;
 }
   }
 
   public synchronized void cleanRegistrations() {
-this.resolver =
-new HashMap();
-this.namespaces = new HashSet();
+this.resolver = new HashMap<>();
+this.namespaces = new HashSet<>();
   }
 
   @Override
   public void updateActiveNamenode(
-  String ns, InetSocketAddress successfulAddress) {
+  String nsId, InetSocketAddress successfulAddress) {
 
 String address = successfulAddress.getHostName() + ":" +
 successfulAddress.getPort();
-String key = ns;
+String key = nsId;
 if (key != null) {
   // Update the active entry
   @SuppressWarnings("unchecked")
-  List iterator =
-  (List) resolver.get(key);
-  for (FederationNamenodeContext namenode : iterator) {
+  List namenodes =
+  (List) this.resolver.get(key);
+  for (FederationNamenodeContext namenode : namenodes) {
 if (namenode.getRpcAddress().equals(address)) {
   MockNamenodeContext nn = (MockNamenodeContext) namenode;
   nn.setState(FederationNamenodeServiceState.ACTIVE);
   break;
 }
   }
-  Collections.sort(iterator, new NamenodePriorityComparator());
+  // This operation modifies the list so we need to be careful
+  synchronized(namenodes) {
+Collections.sort(namenodes, new NamenodePriorityComparator());
+  }
 }
   }
 
   @Override
   public List
   getNamenodesForNameserviceId(String nameserviceId) {
-return resolver.get(nameserviceId);
+// Return a copy of the list because it is updated periodically
+List namenodes =
+this.resolver.get(nameserviceId);
+return Collections.unmodifiableList(new ArrayList<>(namenodes));
   }
 
   @Override
   public List getNamenodesForBlockPoolId(
   String blockPoolId) {
-return resolver.get(blockPoolId);
+// Return a copy of the list because it is updated periodically
+List namenodes =
+this.resolver.get(blockPoolId);
+return Collections.unmodifiableList(new ArrayList<>(namenodes));
   }
 
   private static class MockNamenodeContext
   implements FederationNamenodeContext {
+
+private String namenodeId;
+private String nameserviceId;
+
 private String webAddress;
 private String rpcAddress;
 private String serviceAddress;
 private String lifelineAddress;
-private String namenodeId;
-private String 

[27/32] hadoop git commit: HDFS-12450. Fixing TestNamenodeHeartbeat and support non-HA. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12450. Fixing TestNamenodeHeartbeat and support non-HA. Contributed by 
Inigo Goiri.

(cherry picked from commit 928d1e87f9dbe64f89b858ccc1780723f3af58e7)
(cherry picked from commit 67785fe0063cfbfecab0e8deaae4a45d9c7c0073)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ad1f90b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ad1f90b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ad1f90b

Branch: refs/heads/branch-2
Commit: 9ad1f90b9ddfeb03b24222caa75153bb2a334129
Parents: ec84369
Author: Inigo Goiri 
Authored: Fri Sep 15 16:02:12 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:39:16 2017 -0700

--
 .../router/NamenodeHeartbeatService.java| 47 
 .../server/federation/RouterDFSCluster.java | 23 +-
 2 files changed, 50 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ad1f90b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
index fe4f939..38f63e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
@@ -94,8 +94,9 @@ public class NamenodeHeartbeatService extends PeriodicService 
{
*/
   public NamenodeHeartbeatService(
   ActiveNamenodeResolver resolver, String nsId, String nnId) {
-super(NamenodeHeartbeatService.class.getSimpleName() + " " + nsId + " " +
-nnId);
+super(NamenodeHeartbeatService.class.getSimpleName() +
+(nsId == null ? "" : " " + nsId) +
+(nnId == null ? "" : " " + nnId));
 
 this.resolver = resolver;
 
@@ -109,28 +110,28 @@ public class NamenodeHeartbeatService extends 
PeriodicService {
 
 this.conf = configuration;
 
+String nnDesc = nameserviceId;
 if (this.namenodeId != null && !this.namenodeId.isEmpty()) {
   this.localTarget = new NNHAServiceTarget(
   conf, nameserviceId, namenodeId);
+  nnDesc += "-" + namenodeId;
 } else {
   this.localTarget = null;
 }
 
 // Get the RPC address for the clients to connect
 this.rpcAddress = getRpcAddress(conf, nameserviceId, namenodeId);
-LOG.info("{}-{} RPC address: {}",
-nameserviceId, namenodeId, rpcAddress);
+LOG.info("{} RPC address: {}", nnDesc, rpcAddress);
 
 // Get the Service RPC address for monitoring
 this.serviceAddress =
 DFSUtil.getNamenodeServiceAddr(conf, nameserviceId, namenodeId);
 if (this.serviceAddress == null) {
-  LOG.error("Cannot locate RPC service address for NN {}-{}, " +
-  "using RPC address {}", nameserviceId, namenodeId, this.rpcAddress);
+  LOG.error("Cannot locate RPC service address for NN {}, " +
+  "using RPC address {}", nnDesc, this.rpcAddress);
   this.serviceAddress = this.rpcAddress;
 }
-LOG.info("{}-{} Service RPC address: {}",
-nameserviceId, namenodeId, serviceAddress);
+LOG.info("{} Service RPC address: {}", nnDesc, serviceAddress);
 
 // Get the Lifeline RPC address for faster monitoring
 this.lifelineAddress =
@@ -138,13 +139,12 @@ public class NamenodeHeartbeatService extends 
PeriodicService {
 if (this.lifelineAddress == null) {
   this.lifelineAddress = this.serviceAddress;
 }
-LOG.info("{}-{} Lifeline RPC address: {}",
-nameserviceId, namenodeId, lifelineAddress);
+LOG.info("{} Lifeline RPC address: {}", nnDesc, lifelineAddress);
 
 // Get the Web address for UI
 this.webAddress =
 DFSUtil.getNamenodeWebAddr(conf, nameserviceId, namenodeId);
-LOG.info("{}-{} Web address: {}", nameserviceId, namenodeId, webAddress);
+LOG.info("{} Web address: {}", nnDesc, webAddress);
 
 this.setIntervalMs(conf.getLong(
 DFS_ROUTER_HEARTBEAT_INTERVAL_MS,
@@ -173,7 +173,7 @@ public class NamenodeHeartbeatService extends 
PeriodicService {
 String confKey = DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 String ret = conf.get(confKey);
 
-if (nsId != null && nnId != null) {
+if (nsId != null || nnId != null) {
   // Get if for the proper nameservice and namenode
   confKey = DFSUtil.addKeySuffixes(confKey, nsId, nnId);
   ret = conf.get(confKey);
@@ -182,10 +182,16 @@ 

[25/32] hadoop git commit: HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.

(cherry picked from commit 3b19e77752afce87936f5c0d1e6d272fba798d7b)
(cherry picked from commit bc9e588a19c0aaf518de8dab719362be4a8d6a54)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/550ce086
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/550ce086
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/550ce086

Branch: refs/heads/branch-2
Commit: 550ce086a147366e1ad0196fef75019d0bd176c2
Parents: 23753ff3
Author: Inigo Goiri 
Authored: Fri Sep 8 09:37:10 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:39:14 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  11 +
 .../federation/metrics/FederationMBean.java | 204 ++
 .../federation/metrics/FederationMetrics.java   | 673 +++
 .../federation/metrics/FederationRPCMBean.java  |  90 +++
 .../metrics/FederationRPCMetrics.java   | 239 +++
 .../FederationRPCPerformanceMonitor.java| 211 ++
 .../federation/metrics/NamenodeBeanMetrics.java | 624 +
 .../federation/metrics/StateStoreMBean.java |  45 ++
 .../federation/metrics/StateStoreMetrics.java   | 144 
 .../server/federation/metrics/package-info.java |  27 +
 .../federation/router/ConnectionManager.java|  23 +
 .../federation/router/ConnectionPool.java   |  23 +
 .../hdfs/server/federation/router/Router.java   |  62 ++
 .../server/federation/router/RouterMetrics.java |  73 ++
 .../federation/router/RouterMetricsService.java | 108 +++
 .../federation/router/RouterRpcClient.java  |  39 +-
 .../federation/router/RouterRpcMonitor.java |  95 +++
 .../federation/router/RouterRpcServer.java  |  63 +-
 .../federation/store/CachedRecordStore.java |   8 +
 .../federation/store/StateStoreService.java |  42 +-
 .../store/driver/StateStoreDriver.java  |  17 +-
 .../driver/impl/StateStoreSerializableImpl.java |   6 +-
 .../driver/impl/StateStoreZooKeeperImpl.java|  26 +
 .../store/records/MembershipState.java  |   2 +-
 .../federation/store/records/MountTable.java|  23 +
 .../records/impl/pb/MembershipStatePBImpl.java  |   5 +-
 .../src/main/resources/hdfs-default.xml |  19 +-
 .../server/federation/FederationTestUtils.java  |  13 +
 .../server/federation/RouterConfigBuilder.java  |  13 +
 .../metrics/TestFederationMetrics.java  | 237 +++
 .../federation/metrics/TestMetricsBase.java | 150 +
 .../server/federation/router/TestRouter.java|  23 +-
 .../store/driver/TestStateStoreDriverBase.java  |  69 ++
 33 files changed, 3383 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/550ce086/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b161bc0..3606e7a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -28,6 +28,8 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
+import org.apache.hadoop.hdfs.server.federation.router.RouterRpcMonitor;
+import 
org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
@@ -1041,6 +1043,15 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_ROUTER_PREFIX + "rpc.enable";
   public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
 
+  public static final String DFS_ROUTER_METRICS_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "metrics.enable";
+  public static final boolean DFS_ROUTER_METRICS_ENABLE_DEFAULT = true;
+  public static final String DFS_ROUTER_METRICS_CLASS =
+  FEDERATION_ROUTER_PREFIX + "metrics.class";
+  public static final Class
+  DFS_ROUTER_METRICS_CLASS_DEFAULT =
+  FederationRPCPerformanceMonitor.class;
+
   // HDFS Router heartbeat
   public static final String DFS_ROUTER_HEARTBEAT_ENABLE =

[29/32] hadoop git commit: HDFS-12580. Rebasing HDFS-10467 after HDFS-12447. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12580. Rebasing HDFS-10467 after HDFS-12447. Contributed by Inigo Goiri.

(cherry picked from commit b12440d3479f19138bc66ea59baf41eb89061906)
(cherry picked from commit 6c69e23dcdf1cdbddd47bacdf2dace5c9f06e3ad)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d795989
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d795989
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d795989

Branch: refs/heads/branch-2
Commit: 7d795989e150eec1531155c1f9cfdb3b31fc3168
Parents: f8a71e2
Author: Inigo Goiri 
Authored: Mon Oct 2 18:45:06 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:39:18 2017 -0700

--
 .../hadoop/hdfs/server/federation/router/RouterRpcServer.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d795989/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 1fa1720..650c6ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -1857,8 +1857,8 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   }
 
   @Override
-  public AddECPolicyResponse[] addErasureCodingPolicies(
-  ErasureCodingPolicy[] arg0) throws IOException {
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
+  ErasureCodingPolicy[] policies) throws IOException {
 checkOperation(OperationCategory.WRITE, false);
 return null;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/32] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-10687. Federation Membership State Store internal API. Contributed by 
Jason Kace and Inigo Goiri.

(cherry picked from commit 95cae08849d23cf12c5d280c29f90908e6a90d40)
(cherry picked from commit 55da7fd7ebe2f3fa1c1c828dda727fddc75a1b81)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0228ead4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0228ead4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0228ead4

Branch: refs/heads/branch-2
Commit: 0228ead4d1883613ba013f6e768ead42a3f6ead6
Parents: a09d721
Author: Inigo Goiri 
Authored: Mon Jul 31 10:55:21 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:30 2017 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   3 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   1 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  17 +-
 .../resolver/MembershipNamenodeResolver.java| 290 
 .../federation/router/FederationUtil.java   |  42 +-
 .../federation/store/CachedRecordStore.java | 237 ++
 .../federation/store/MembershipStore.java   | 126 +
 .../federation/store/StateStoreCache.java   |  36 ++
 .../store/StateStoreCacheUpdateService.java |  67 +++
 .../federation/store/StateStoreService.java | 202 +++-
 .../store/impl/MembershipStoreImpl.java | 311 +
 .../federation/store/impl/package-info.java |  31 ++
 .../GetNamenodeRegistrationsRequest.java|  52 +++
 .../GetNamenodeRegistrationsResponse.java   |  55 +++
 .../store/protocol/GetNamespaceInfoRequest.java |  30 ++
 .../protocol/GetNamespaceInfoResponse.java  |  52 +++
 .../protocol/NamenodeHeartbeatRequest.java  |  52 +++
 .../protocol/NamenodeHeartbeatResponse.java |  49 ++
 .../UpdateNamenodeRegistrationRequest.java  |  72 +++
 .../UpdateNamenodeRegistrationResponse.java |  51 ++
 .../impl/pb/FederationProtocolPBTranslator.java | 145 ++
 .../GetNamenodeRegistrationsRequestPBImpl.java  |  87 
 .../GetNamenodeRegistrationsResponsePBImpl.java |  99 
 .../impl/pb/GetNamespaceInfoRequestPBImpl.java  |  60 +++
 .../impl/pb/GetNamespaceInfoResponsePBImpl.java |  95 
 .../impl/pb/NamenodeHeartbeatRequestPBImpl.java |  93 
 .../pb/NamenodeHeartbeatResponsePBImpl.java |  71 +++
 ...UpdateNamenodeRegistrationRequestPBImpl.java |  95 
 ...pdateNamenodeRegistrationResponsePBImpl.java |  73 +++
 .../store/protocol/impl/pb/package-info.java|  29 ++
 .../store/records/MembershipState.java  | 329 +
 .../store/records/MembershipStats.java  | 126 +
 .../records/impl/pb/MembershipStatePBImpl.java  | 334 +
 .../records/impl/pb/MembershipStatsPBImpl.java  | 191 
 .../src/main/proto/FederationProtocol.proto | 107 +
 .../src/main/resources/hdfs-default.xml |  18 +-
 .../resolver/TestNamenodeResolver.java  | 284 
 .../store/FederationStateStoreTestUtils.java|  23 +-
 .../federation/store/TestStateStoreBase.java|  81 
 .../store/TestStateStoreMembershipState.java| 463 +++
 .../store/driver/TestStateStoreDriverBase.java  |  69 ++-
 .../store/records/TestMembershipState.java  | 129 ++
 42 files changed, 4745 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0228ead4/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 53897ac..ce96062 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -15,6 +15,9 @@

  
  
+   
+ 
+ 

  
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0228ead4/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 87d1942..4f9b782 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -348,6 +348,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   QJournalProtocol.proto
   editlog.proto
   fsimage.proto
+  FederationProtocol.proto
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0228ead4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

[26/32] hadoop git commit: HDFS-12430. Rebasing HDFS-10467 After HDFS-12269 and HDFS-12218. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12430. Rebasing HDFS-10467 After HDFS-12269 and HDFS-12218. Contributed by 
Inigo Goiri.

(cherry picked from commit 3302e792d469b7e8f3bfa10151e4e1c546589734)
(cherry picked from commit 1f06b81ecb14044964176dd16fafaa0ee96bfe3d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec843695
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec843695
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec843695

Branch: refs/heads/branch-2
Commit: ec84369538618bb56cfdf98a7d910683287cb529
Parents: 550ce08
Author: Inigo Goiri 
Authored: Wed Sep 13 09:15:13 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:39:15 2017 -0700

--
 .../hdfs/server/federation/router/RouterRpcServer.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec843695/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 6aee1ee..1fa1720 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
@@ -76,7 +75,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -89,6 +88,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -1879,19 +1879,19 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   }
 
   @Override
-  public ECBlockGroupsStats getECBlockGroupsStats() throws IOException {
+  public ECBlockGroupStats getECBlockGroupStats() throws IOException {
 checkOperation(OperationCategory.READ, false);
 return null;
   }
 
   @Override
-  public HashMap getErasureCodingCodecs() throws IOException {
+  public Map getErasureCodingCodecs() throws IOException {
 checkOperation(OperationCategory.READ, false);
 return null;
   }
 
   @Override
-  public BlocksStats getBlocksStats() throws IOException {
+  public ReplicatedBlockStats getReplicatedBlockStats() throws IOException {
 checkOperation(OperationCategory.READ, false);
 return null;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/32] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78abcb8f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
new file mode 100644
index 000..170247f
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
@@ -0,0 +1,261 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.util.Time;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * The administrator interface of the {@link Router} implemented by
+ * {@link RouterAdminServer}.
+ */
+public class TestRouterAdmin {
+
+  private static StateStoreDFSCluster cluster;
+  private static RouterContext routerContext;
+  public static final String RPC_BEAN =
+  "Hadoop:service=Router,name=FederationRPC";
+  private static List mockMountTable;
+  private static StateStoreService stateStore;
+
+  @BeforeClass
+  public static void globalSetUp() throws Exception {
+cluster = new StateStoreDFSCluster(false, 1);
+// Build and start a router with State Store + admin + RPC
+Configuration conf = new RouterConfigBuilder()
+.stateStore()
+.admin()
+.rpc()
+.build();
+cluster.addRouterOverrides(conf);
+cluster.startRouters();
+routerContext = cluster.getRandomRouter();
+mockMountTable = cluster.generateMockMountTable();
+Router router = routerContext.getRouter();
+stateStore = router.getStateStore();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+cluster.stopRouter(routerContext);
+  }
+
+  @Before
+  public void testSetup() throws Exception {
+assertTrue(
+synchronizeRecords(stateStore, mockMountTable, MountTable.class));
+  }
+
+  @Test
+  public void testAddMountTable() throws IOException {
+MountTable newEntry = MountTable.newInstance(
+"/testpath", Collections.singletonMap("ns0", "/testdir"),
+Time.now(), Time.now());
+
+RouterClient client = routerContext.getAdminClient();
+MountTableManager mountTable = client.getMountTableManager();
+
+// Existing mount table size
+List records = 

[23/32] hadoop git commit: HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by 
Inigo Goiri.

(cherry picked from commit fabe02c8fafa807198054da0c02b2ebaafda76aa)
(cherry picked from commit cc58e7a983d8f1351089462f531993f7b4f0a9c5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23753ff3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23753ff3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23753ff3

Branch: refs/heads/branch-2
Commit: 23753ff3c55a3b7f17d20c71c54a4a8561665ab3
Parents: a100f63
Author: Inigo Goiri 
Authored: Thu Sep 7 13:53:08 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:39:12 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  |  4 
 .../server/federation/router/RouterRpcServer.java| 15 +++
 2 files changed, 15 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23753ff3/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 64ad6fb..8ae3db8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -222,10 +222,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
 
   org.apache.curator
-  curator-framework
-
-
-  org.apache.curator
   curator-test
   test
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23753ff3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index c77d255..f9b4a5d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -81,6 +81,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -91,6 +92,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
@@ -1607,6 +1609,19 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   }
 
   @Override // ClientProtocol
+  public void reencryptEncryptionZone(String zone, ReencryptAction action)
+  throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override // ClientProtocol
+  public BatchedEntries listReencryptionStatus(
+  long prevId) throws IOException {
+checkOperation(OperationCategory.READ, false);
+return null;
+  }
+
+  @Override // ClientProtocol
   public void setXAttr(String src, XAttr xAttr, EnumSet flag)
   throws IOException {
 checkOperation(OperationCategory.WRITE);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/32] hadoop git commit: HDFS-12620. Backporting HDFS-10467 to branch-2. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12620. Backporting HDFS-10467 to branch-2. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12c81c67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12c81c67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12c81c67

Branch: refs/heads/branch-2
Commit: 12c81c67d7906f613e33796042f0544aa50c3d27
Parents: 629b88b
Author: Inigo Goiri 
Authored: Thu Oct 19 17:40:42 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:40:42 2017 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   7 +
 .../federation/metrics/FederationMetrics.java   | 139 +--
 .../federation/metrics/NamenodeBeanMetrics.java |  61 
 .../federation/resolver/MountTableResolver.java |  16 ++-
 .../federation/router/ConnectionManager.java|   2 +-
 .../federation/router/ConnectionPool.java   |   2 +-
 .../federation/router/RouterRpcServer.java  | 115 +--
 .../hdfs/server/federation/MockResolver.java|   5 +-
 .../server/federation/router/TestRouterRpc.java |   2 +-
 9 files changed, 161 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12c81c67/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index fbfbaf2..0b96ec2 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -47,6 +47,8 @@ function print_usage(){
   echo "  datanode run a DFS datanode"
   echo "  debugrun a Debug Admin to execute HDFS debug 
commands"
   echo "  dfsadmin run a DFS admin client"
+  echo "  dfsrouterrun the DFS router"
+  echo "  dfsrouteradmin   manage Router-based federation"
   echo "  haadmin  run a DFS HA admin client"
   echo "  fsck run a DFS filesystem checking utility"
   echo "  balancer run a cluster balancing utility"
@@ -157,6 +159,11 @@ elif [ "$COMMAND" = "dfs" ] ; then
 elif [ "$COMMAND" = "dfsadmin" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
   HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "dfsrouter" ] ; then
+  CLASS='org.apache.hadoop.hdfs.server.federation.router.DFSRouter'
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ROUTER_OPTS"
+elif [ "$COMMAND" = "dfsrouteradmin" ] ; then
+  CLASS='org.apache.hadoop.hdfs.tools.federation.RouterAdmin'
 elif [ "$COMMAND" = "haadmin" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
   CLASSPATH=${CLASSPATH}:${TOOL_PATH}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12c81c67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
index 7844a2e..685c585 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
@@ -31,6 +31,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
 import java.util.LinkedList;
@@ -38,10 +39,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-import java.util.function.ToIntFunction;
-import java.util.function.ToLongFunction;
-import java.util.stream.Collectors;
 
 import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
@@ -72,7 +69,7 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
 import org.codehaus.jettison.json.JSONObject;
-import org.eclipse.jetty.util.ajax.JSON;
+import org.mortbay.util.ajax.JSON;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -263,12 +260,12 @@ public class FederationMetrics implements FederationMBean 
{
 
   @Override
   public long getTotalCapacity() {
-return getNameserviceAggregatedLong(MembershipStats::getTotalSpace);
+return getNameserviceAggregatedLong("getTotalSpace");
   }
 
   @Override
   public long 

[31/32] hadoop git commit: HDFS-12577. Rename Router tooling. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12577. Rename Router tooling. Contributed by Inigo Goiri.

(cherry picked from commit 53e8d0d030525e4c7f3875e23807c6dbe778890f)
(cherry picked from commit 5d63a388d1c3ec8a658cb2fd9b34c240bddf15a0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/629b88b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/629b88b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/629b88b4

Branch: refs/heads/branch-2
Commit: 629b88b4dd355002d5b6d0cce7525c1f20b8592f
Parents: 27295ee
Author: Inigo Goiri 
Authored: Fri Oct 6 17:31:53 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:39:27 2017 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs.cmd   | 11 +--
 .../server/federation/router/DFSRouter.java | 76 
 .../hdfs/server/federation/router/Router.java   | 39 --
 .../src/site/markdown/HDFSRouterFederation.md   | 12 ++--
 4 files changed, 88 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/629b88b4/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index 53bdf70..a9a7852 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
 )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router federation debug
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto dfsrouter dfsrouteradmin debug
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -179,12 +179,12 @@ goto :eof
   set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
   goto :eof
 
-:router
-  set CLASS=org.apache.hadoop.hdfs.server.federation.router.Router
+:dfsrouter
+  set CLASS=org.apache.hadoop.hdfs.server.federation.router.DFSRouter
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
   goto :eof
 
-:federation
+:dfsrouteradmin
   set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
   goto :eof
@@ -229,7 +229,8 @@ goto :eof
   @echo   secondarynamenoderun the DFS secondary namenode
   @echo   namenode run the DFS namenode
   @echo   journalnode  run the DFS journalnode
-  @echo   router   run the DFS router
+  @echo   dfsrouterrun the DFS router
+  @echo   dfsrouteradmin   manage Router-based federation
   @echo   zkfc run the ZK Failover Controller daemon
   @echo   datanode run a DFS datanode
   @echo   dfsadmin run a DFS admin client

http://git-wip-us.apache.org/repos/asf/hadoop/blob/629b88b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
new file mode 100644
index 000..a2ac258
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
+import 

[22/32] hadoop git commit: HDFS-10631. Federation State Store ZooKeeper implementation. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-10631. Federation State Store ZooKeeper implementation. Contributed by 
Jason Kace and Inigo Goiri.

(cherry picked from commit 23c4ddee11ab1300325a6361124ee8ad6f68d7a4)
(cherry picked from commit 7cb6bdf09ed361e067ebf234230babd1391a7d4b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a100f63a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a100f63a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a100f63a

Branch: refs/heads/branch-2
Commit: a100f63a16204dc586cdd1b00c84df85d76fa319
Parents: 36c3515
Author: Inigo Goiri 
Authored: Mon Aug 21 11:40:41 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:39:11 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   9 +
 .../driver/impl/StateStoreSerializableImpl.java |  19 ++
 .../driver/impl/StateStoreZooKeeperImpl.java| 298 +++
 .../store/driver/TestStateStoreDriverBase.java  |   2 +-
 .../store/driver/TestStateStoreZK.java  | 105 +++
 5 files changed, 432 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a100f63a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 453c919..64ad6fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -220,6 +220,15 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   com.fasterxml.jackson.core
   jackson-databind
 
+
+  org.apache.curator
+  curator-framework
+
+
+  org.apache.curator
+  curator-test
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a100f63a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java
index e9b3fdf..e2038fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java
@@ -30,6 +30,11 @@ import 
org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
  */
 public abstract class StateStoreSerializableImpl extends StateStoreBaseImpl {
 
+  /** Mark for slashes in path names. */
+  protected static final String SLASH_MARK = "0SLASH0";
+  /** Mark for colon in path names. */
+  protected static final String COLON_MARK = "_";
+
   /** Default serializer for this driver. */
   private StateStoreSerializer serializer;
 
@@ -74,4 +79,18 @@ public abstract class StateStoreSerializableImpl extends 
StateStoreBaseImpl {
   String data, Class clazz, boolean includeDates) throws IOException {
 return serializer.deserialize(data, clazz);
   }
+
+  /**
+   * Get the primary key for a record. If we don't want to store in folders, we
+   * need to remove / from the name.
+   *
+   * @param record Record to get the primary key for.
+   * @return Primary key for the record.
+   */
+  protected static String getPrimaryKey(BaseRecord record) {
+String primaryKey = record.getPrimaryKey();
+primaryKey = primaryKey.replaceAll("/", SLASH_MARK);
+primaryKey = primaryKey.replaceAll(":", COLON_MARK);
+return primaryKey;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a100f63a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java
new file mode 100644
index 000..ddcd537
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information

[02/32] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.

(cherry picked from commit 6821e801724ac38e9737538b2164c9ae88792282)
(cherry picked from commit 2761bbc91a7b0a36c42b1b6569c5ecd4f236281b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/096e8c4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/096e8c4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/096e8c4f

Branch: refs/heads/branch-2
Commit: 096e8c4f35cea18d0f540500d05dcc59ee2b868e
Parents: 99ce49c
Author: Inigo 
Authored: Tue Mar 28 14:30:59 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:21 2017 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs.cmd   |   8 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  17 +
 .../resolver/ActiveNamenodeResolver.java| 117 +++
 .../resolver/FederationNamenodeContext.java |  87 +++
 .../FederationNamenodeServiceState.java |  46 ++
 .../resolver/FederationNamespaceInfo.java   |  99 +++
 .../resolver/FileSubclusterResolver.java|  75 ++
 .../resolver/NamenodePriorityComparator.java|  63 ++
 .../resolver/NamenodeStatusReport.java  | 195 +
 .../federation/resolver/PathLocation.java   | 122 +++
 .../federation/resolver/RemoteLocation.java |  74 ++
 .../federation/resolver/package-info.java   |  41 +
 .../federation/router/FederationUtil.java   | 117 +++
 .../router/RemoteLocationContext.java   |  38 +
 .../hdfs/server/federation/router/Router.java   | 263 +++
 .../federation/router/RouterRpcServer.java  | 102 +++
 .../server/federation/router/package-info.java  |  31 +
 .../federation/store/StateStoreService.java |  77 ++
 .../server/federation/store/package-info.java   |  62 ++
 .../src/main/resources/hdfs-default.xml |  16 +
 .../server/federation/FederationTestUtils.java  | 233 ++
 .../hdfs/server/federation/MockResolver.java| 290 +++
 .../server/federation/RouterConfigBuilder.java  |  40 +
 .../server/federation/RouterDFSCluster.java | 767 +++
 .../server/federation/router/TestRouter.java|  96 +++
 25 files changed, 3075 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/096e8c4f/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index 2181e47..b9853d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
 )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto debug
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router debug
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -179,6 +179,11 @@ goto :eof
   set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
   goto :eof
 
+:router
+  set CLASS=org.apache.hadoop.hdfs.server.federation.router.Router
+  set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
+  goto :eof
+
 :debug
   set CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
   goto :eof
@@ -219,6 +224,7 @@ goto :eof
   @echo   secondarynamenoderun the DFS secondary namenode
   @echo   namenode run the DFS namenode
   @echo   journalnode  run the DFS journalnode
+  @echo   router   run the DFS router
   @echo   zkfc run the ZK Failover Controller daemon
   @echo   datanode run a DFS datanode
   @echo   dfsadmin run a DFS admin client

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096e8c4f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e4c02c2..912307f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1001,6 +1001,23 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   

[20/32] hadoop git commit: HDFS-11554. [Documentation] Router-based federation documentation. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-11554. [Documentation] Router-based federation documentation. Contributed 
by Inigo Goiri.

(cherry picked from commit ee3260211d94aed223dd6f2386a166eb2c7d67af)
(cherry picked from commit 67d10087aff9d4ab2748aefc1b97522495c148f8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b599cdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b599cdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b599cdc

Branch: refs/heads/branch-2
Commit: 0b599cdc874f4521eeba105ca635e01daa060322
Parents: 78abcb8
Author: Inigo Goiri 
Authored: Wed Aug 16 17:23:29 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:57 2017 -0700

--
 .../src/site/markdown/HDFSRouterFederation.md   | 170 +++
 .../site/resources/images/routerfederation.png  | Bin 0 -> 24961 bytes
 2 files changed, 170 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b599cdc/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
new file mode 100644
index 000..f094238
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -0,0 +1,170 @@
+
+
+HDFS Router-based Federation
+
+
+
+
+Introduction
+
+
+NameNodes have scalability limits because of the metadata overhead comprised 
of inodes (files and directories) and file blocks, the number of Datanode 
heartbeats, and the number of HDFS RPC client requests.
+The common solution is to split the filesystem into smaller subclusters [HDFS 
Federation](.Federation.html) and provide a federated view 
[ViewFs](.ViewFs.html).
+The problem is how to maintain the split of the subclusters (e.g., namespace 
partition), which forces users to connect to multiple subclusters and manage 
the allocation of folders/files to them.
+
+
+Architecture
+
+
+A natural extension to this partitioned federation is to add a layer of 
software responsible for federating the namespaces.
+This extra layer allows users to access any subcluster transparently, lets 
subclusters manage their own block pools independently, and supports 
rebalancing of data across subclusters.
+To accomplish these goals, the federation layer directs block accesses to the 
proper subcluster, maintains the state of the namespaces, and provides 
mechanisms for data rebalancing.
+This layer must be scalable, highly available, and fault tolerant.
+
+This federation layer comprises multiple components.
+The _Router_ component that has the same interface as a NameNode, and forwards 
the client requests to the correct subcluster, based on ground-truth 
information from a State Store.
+The _State Store_ combines a remote _Mount Table_ (in the flavor of 
[ViewFs](.ViewFs.html), but shared between clients) and utilization 
(load/capacity) information about the subclusters.
+This approach has the same architecture as [YARN 
federation](../hadoop-yarn/Federation.html).
+
+![Router-based Federation Sequence Diagram | 
width=800](./images/routerfederation.png)
+
+
+### Example flow
+The simplest configuration deploys a Router on each NameNode machine.
+The Router monitors the local NameNode and heartbeats the state to the State 
Store.
+When a regular DFS client contacts any of the Routers to access a file in the 
federated filesystem, the Router checks the Mount Table in the State Store 
(i.e., the local cache) to find out which subcluster contains the file.
+Then it checks the Membership table in the State Store (i.e., the local cache) 
for the NameNode responsible for the subcluster.
+After it has identified the correct NameNode, the Router proxies the request.
+The client accesses Datanodes directly.
+
+
+### Router
+There can be multiple Routers in the system with soft state.
+Each Router has two roles:
+
+* Federated interface: expose a single, global NameNode interface to the 
clients and forward the requests to the active NameNode in the correct 
subcluster
+* NameNode heartbeat: maintain the information about a NameNode in the State 
Store
+
+ Federated interface
+The Router receives a client request, checks the State Store for the correct 
subcluster, and forwards the request to the active NameNode of that subcluster.
+The reply from the NameNode then flows in the opposite direction.
+The Routers are stateless and can be behind a load balancer.
+For performance, the Router also caches remote mount table entries and the 
state of the subclusters.
+To make sure that changes have been propagated to all Routers, 

[05/32] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bd91fa2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
new file mode 100644
index 000..7f0b36a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
@@ -0,0 +1,483 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.driver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
+import org.apache.hadoop.hdfs.server.federation.store.records.Query;
+import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
+import org.junit.AfterClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base tests for the driver. The particular implementations will use this to
+ * test their functionality.
+ */
+public class TestStateStoreDriverBase {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStateStoreDriverBase.class);
+
+  private static StateStoreService stateStore;
+  private static Configuration conf;
+
+
+  /**
+   * Get the State Store driver.
+   * @return State Store driver.
+   */
+  protected StateStoreDriver getStateStoreDriver() {
+return stateStore.getDriver();
+  }
+
+  @AfterClass
+  public static void tearDownCluster() {
+if (stateStore != null) {
+  stateStore.stop();
+}
+  }
+
+  /**
+   * Get a new State Store using this configuration.
+   *
+   * @param config Configuration for the State Store.
+   * @throws Exception If we cannot get the State Store.
+   */
+  public static void getStateStore(Configuration config) throws Exception {
+conf = config;
+stateStore = FederationStateStoreTestUtils.getStateStore(conf);
+  }
+
+  private  T generateFakeRecord(Class recordClass)
+  throws IllegalArgumentException, IllegalAccessException, IOException {
+
+// TODO add record
+return null;
+  }
+
+  /**
+   * Validate if a record is the same.
+   *
+   * @param original
+   * @param committed
+   * @param assertEquals Assert if the records are equal or just return.
+   * @return
+   * @throws IllegalArgumentException
+   * @throws IllegalAccessException
+   */
+  private boolean validateRecord(
+  BaseRecord original, BaseRecord committed, boolean assertEquals)
+  throws IllegalArgumentException, IllegalAccessException {
+
+boolean ret = true;
+
+Map fields = getFields(original);
+for (String key : fields.keySet()) {
+  if (key.equals("dateModified") ||
+  key.equals("dateCreated") ||
+  key.equals("proto")) {
+// Fields are updated/set on commit and fetch and may not match
+// the fields that are initialized in a non-committed object.
+continue;
+  }
+  Object data1 = getField(original, key);
+  Object data2 = getField(committed, key);
+  if (assertEquals) {
+assertEquals("Field " + key + " does not match", data1, data2);
+  } else if (!data1.equals(data2)) {
+ret = false;
+  }
+}
+
+long now = 

[12/32] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0228ead4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
new file mode 100644
index 000..2d74505
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.resolver;
+
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.ROUTERS;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyException;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.clearRecords;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.newStateStore;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.waitStateStore;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import 
org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException;
+import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test the basic {@link ActiveNamenodeResolver} functionality.
+ */
+public class TestNamenodeResolver {
+
+  private static StateStoreService stateStore;
+  private static ActiveNamenodeResolver namenodeResolver;
+
+  @BeforeClass
+  public static void create() throws Exception {
+
+Configuration conf = getStateStoreConfiguration();
+
+// Reduce expirations to 5 seconds
+conf.setLong(
+DFSConfigKeys.FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS,
+TimeUnit.SECONDS.toMillis(5));
+
+stateStore = newStateStore(conf);
+assertNotNull(stateStore);
+
+namenodeResolver = new MembershipNamenodeResolver(conf, stateStore);
+namenodeResolver.setRouterId(ROUTERS[0]);
+  }
+
+  @AfterClass
+  public static void destroy() throws Exception {
+stateStore.stop();
+stateStore.close();
+  }
+
+  @Before
+  public void setup() throws IOException, InterruptedException {
+// Wait for state store to connect
+stateStore.loadDriver();
+waitStateStore(stateStore, 1);
+
+// Clear NN registrations
+boolean cleared = clearRecords(stateStore, MembershipState.class);
+assertTrue(cleared);
+  }
+
+  @Test
+  public void testStateStoreDisconnected() throws Exception {
+
+// Add an entry to the store
+NamenodeStatusReport report = createNamenodeReport(
+NAMESERVICES[0], NAMENODES[0], HAServiceState.ACTIVE);
+assertTrue(namenodeResolver.registerNamenode(report));
+
+// Close the data store driver
+stateStore.closeDriver();
+assertFalse(stateStore.isDriverReady());
+
+// Flush the caches
+stateStore.refreshCaches(true);
+
+// Verify commands 

[11/32] hadoop git commit: HDFS-12223. Rebasing HDFS-10467. Contributed by Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-12223. Rebasing HDFS-10467. Contributed by Inigo Goiri.

(cherry picked from commit 47db6e9d8e2c264671c89fdd6cb11a7c762d2cce)
(cherry picked from commit 0ec82b8cdfaaa5f23d1a0f7f7fb8c9187c5e309b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a09d7219
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a09d7219
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a09d7219

Branch: refs/heads/branch-2
Commit: a09d7219c9d90307064196519af111552ac603f9
Parents: 2f95eac
Author: Inigo Goiri 
Authored: Fri Jul 28 15:55:10 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:29 2017 -0700

--
 .../federation/router/RouterRpcServer.java  | 59 +---
 1 file changed, 51 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a09d7219/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 4bae71e..eaaab39 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -64,8 +64,9 @@ import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
-import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
@@ -75,6 +76,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -85,6 +87,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -1736,13 +1739,6 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   }
 
   @Override // ClientProtocol
-  public AddingECPolicyResponse[] addErasureCodingPolicies(
-  ErasureCodingPolicy[] policies) throws IOException {
-checkOperation(OperationCategory.WRITE, false);
-return null;
-  }
-
-  @Override // ClientProtocol
   public void unsetErasureCodingPolicy(String src) throws IOException {
 checkOperation(OperationCategory.WRITE, false);
   }
@@ -1808,6 +1804,53 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
 return null;
   }
 
+  @Override
+  public AddECPolicyResponse[] addErasureCodingPolicies(
+  ErasureCodingPolicy[] arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+return null;
+  }
+
+  @Override
+  public void removeErasureCodingPolicy(String arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public void disableErasureCodingPolicy(String arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public void enableErasureCodingPolicy(String arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public ECBlockGroupsStats getECBlockGroupsStats() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return null;
+  }
+
+  @Override
+  public HashMap getErasureCodingCodecs() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return null;
+  }
+
+  @Override
+  public BlocksStats 

[03/32] hadoop git commit: HDFS-10881. Federation State Store Driver API. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-10881. Federation State Store Driver API. Contributed by Jason Kace and 
Inigo Goiri.

(cherry picked from commit 0f88e049156dce173afc0dbda864e29190dd2210)
(cherry picked from commit 533b986633e0a9076cf3918fba3e3b591c6f65f2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3b8b5e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3b8b5e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3b8b5e4

Branch: refs/heads/branch-2
Commit: c3b8b5e49ffccb01f173e19c9daceedd06875714
Parents: 096e8c4
Author: Inigo 
Authored: Wed Mar 29 19:35:06 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:24 2017 -0700

--
 .../store/StateStoreUnavailableException.java   |  33 
 .../federation/store/StateStoreUtils.java   |  72 +++
 .../store/driver/StateStoreDriver.java  | 172 +
 .../driver/StateStoreRecordOperations.java  | 164 
 .../store/driver/impl/StateStoreBaseImpl.java   |  69 +++
 .../store/driver/impl/package-info.java |  39 
 .../federation/store/driver/package-info.java   |  37 
 .../federation/store/protocol/package-info.java |  31 +++
 .../federation/store/records/BaseRecord.java| 189 +++
 .../federation/store/records/QueryResult.java   |  56 ++
 .../federation/store/records/package-info.java  |  36 
 11 files changed, 898 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3b8b5e4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
new file mode 100644
index 000..4e6f8c8
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import java.io.IOException;
+
+/**
+ * Thrown when the state store is not reachable or available. Cached APIs and
+ * queries may succeed. Client should retry again later.
+ */
+public class StateStoreUnavailableException extends IOException {
+
+  private static final long serialVersionUID = 1L;
+
+  public StateStoreUnavailableException(String msg) {
+super(msg);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3b8b5e4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
new file mode 100644
index 000..8c681df
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" 

[08/32] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f95eac8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 24792bb..4bae71e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -17,16 +17,109 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY;
+
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
+import org.apache.hadoop.fs.CacheFlag;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.QuotaUsage;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.AddBlockFlag;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.inotify.EventBatchList;
+import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
+import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
+import 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import 

[04/32] hadoop git commit: HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and 
Inigo Goiri.

(cherry picked from commit 6d94c90ece1c1d23d4c97e72c54e9991f5dbc481)
(cherry picked from commit 2c740a684a23663962119726bf0e7ecef173f6f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f6d18dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f6d18dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f6d18dd

Branch: refs/heads/branch-2
Commit: 3f6d18ddf657e417cfc3f921d8fba39559ac48ea
Parents: c3b8b5e
Author: Inigo 
Authored: Thu Apr 6 19:18:52 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:25 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  11 ++
 .../server/federation/store/RecordStore.java| 100 
 .../store/driver/StateStoreSerializer.java  | 119 +++
 .../driver/impl/StateStoreSerializerPBImpl.java | 115 ++
 .../store/records/impl/pb/PBRecord.java |  47 
 .../store/records/impl/pb/package-info.java |  29 +
 .../src/main/resources/hdfs-default.xml |   8 ++
 7 files changed, 429 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f6d18dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 912307f..b645347 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1018,6 +1019,16 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT 
=
   "org.apache.hadoop.hdfs.server.federation.MockResolver";
 
+  // HDFS Router-based federation State Store
+  public static final String FEDERATION_STORE_PREFIX =
+  FEDERATION_ROUTER_PREFIX + "store.";
+
+  public static final String FEDERATION_STORE_SERIALIZER_CLASS =
+  DFSConfigKeys.FEDERATION_STORE_PREFIX + "serializer";
+  public static final Class
+  FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT =
+  StateStoreSerializerPBImpl.class;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f6d18dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
new file mode 100644
index 000..524f432
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import java.lang.reflect.Constructor;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import 

[06/32] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.

2017-10-19 Thread inigoiri
HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace 
and Inigo Goiri.

(cherry picked from commit c6e0bd640cdaf83a660fa050809cad6f1d4c6f4d)
(cherry picked from commit 4bf877b03f0e01c4bcedc689c66689701e62b560)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bd91fa2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bd91fa2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bd91fa2

Branch: refs/heads/branch-2
Commit: 7bd91fa2edcb2deac10653d0140a2cd5e462f477
Parents: 3f6d18d
Author: Inigo Goiri 
Authored: Tue May 2 15:49:53 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 19 17:38:27 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +
 .../federation/router/PeriodicService.java  | 198 
 .../StateStoreConnectionMonitorService.java |  67 +++
 .../federation/store/StateStoreService.java | 152 +-
 .../federation/store/StateStoreUtils.java   |  51 +-
 .../store/driver/StateStoreDriver.java  |  31 +-
 .../driver/StateStoreRecordOperations.java  |  17 +-
 .../store/driver/impl/StateStoreBaseImpl.java   |  31 +-
 .../driver/impl/StateStoreFileBaseImpl.java | 429 
 .../store/driver/impl/StateStoreFileImpl.java   | 161 +++
 .../driver/impl/StateStoreFileSystemImpl.java   | 178 +++
 .../driver/impl/StateStoreSerializableImpl.java |  77 +++
 .../federation/store/records/BaseRecord.java|  20 +-
 .../server/federation/store/records/Query.java  |  66 +++
 .../src/main/resources/hdfs-default.xml |  16 +
 .../store/FederationStateStoreTestUtils.java| 232 +
 .../store/driver/TestStateStoreDriverBase.java  | 483 +++
 .../store/driver/TestStateStoreFile.java|  64 +++
 .../store/driver/TestStateStoreFileSystem.java  |  88 
 19 files changed, 2329 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bd91fa2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b645347..1b66ead 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -18,12 +18,16 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
 import org.apache.hadoop.http.HttpConfig;
 
@@ -1029,6 +1033,16 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT =
   StateStoreSerializerPBImpl.class;
 
+  public static final String FEDERATION_STORE_DRIVER_CLASS =
+  FEDERATION_STORE_PREFIX + "driver.class";
+  public static final Class
+  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+
+  public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
+  FEDERATION_STORE_PREFIX + "connection.test";
+  public static final long FEDERATION_STORE_CONNECTION_TEST_MS_DEFAULT =
+  TimeUnit.MINUTES.toMillis(1);
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bd91fa2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
new file mode 100644
index 000..5e1
--- /dev/null
+++ 

hadoop git commit: YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler agnostic. (Contributed by Haibo Chen)

2017-10-19 Thread yufei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 612350eb9 -> b2f516118


YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler 
agnostic. (Contributed by Haibo Chen)

(cherry picked from commit 7b4b0187806601e33f5a88d48991e7c12ee4419f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2f51611
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2f51611
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2f51611

Branch: refs/heads/branch-3.0
Commit: b2f516118ceecf3c135eb8515006d40daeffaf75
Parents: 612350e
Author: Yufei Gu 
Authored: Thu Oct 19 16:51:29 2017 -0700
Committer: Yufei Gu 
Committed: Thu Oct 19 16:53:10 2017 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/TestAppManager.java  | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2f51611/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index db26a87..91406cc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -301,8 +301,6 @@ public class TestAppManager{
   @Test
   public void testQueueSubmitWithNoPermission() throws IOException {
 YarnConfiguration conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
 conf.set(PREFIX + "root.acl_submit_applications", " ");
 conf.set(PREFIX + "root.acl_administer_queue", " ");
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler agnostic. (Contributed by Haibo Chen)

2017-10-19 Thread yufei
Repository: hadoop
Updated Branches:
  refs/heads/trunk ca8ddc6aa -> 7b4b01878


YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler 
agnostic. (Contributed by Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b4b0187
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b4b0187
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b4b0187

Branch: refs/heads/trunk
Commit: 7b4b0187806601e33f5a88d48991e7c12ee4419f
Parents: ca8ddc6
Author: Yufei Gu 
Authored: Thu Oct 19 16:51:29 2017 -0700
Committer: Yufei Gu 
Committed: Thu Oct 19 16:51:47 2017 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/TestAppManager.java  | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b4b0187/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index b24a309..8179321 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -305,8 +305,6 @@ public class TestAppManager{
   @Test
   public void testQueueSubmitWithNoPermission() throws IOException {
 YarnConfiguration conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
 conf.set(PREFIX + "root.acl_submit_applications", " ");
 conf.set(PREFIX + "root.acl_administer_queue", " ");
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler agnostic. (Contributed by Haibo Chen)

2017-10-19 Thread yufei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3828b33b8 -> 99ce49c52


YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler 
agnostic. (Contributed by Haibo Chen)

(cherry picked from commit 7b4b0187806601e33f5a88d48991e7c12ee4419f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99ce49c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99ce49c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99ce49c5

Branch: refs/heads/branch-2
Commit: 99ce49c521258650b31b77171f32bf7dbc8ff6e9
Parents: 3828b33
Author: Yufei Gu 
Authored: Thu Oct 19 16:51:29 2017 -0700
Committer: Yufei Gu 
Committed: Thu Oct 19 16:52:32 2017 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/TestAppManager.java  | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99ce49c5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index 009eb2c..8a5c730 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -301,8 +301,6 @@ public class TestAppManager{
   @Test
   public void testQueueSubmitWithNoPermission() throws IOException {
 YarnConfiguration conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
 conf.set(PREFIX + "root.acl_submit_applications", " ");
 conf.set(PREFIX + "root.acl_administer_queue", " ");
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14816. Update Dockerfile to use Xenial. Contributed by Allen Wittenauer

2017-10-19 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 288d2fc30 -> 612350eb9


HADOOP-14816. Update Dockerfile to use Xenial. Contributed by Allen Wittenauer

(cherry picked from commit ca8ddc6aa413de347866ad9a0a3407356a280a1f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/612350eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/612350eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/612350eb

Branch: refs/heads/branch-3.0
Commit: 612350eb95dc6af409a6fb0b3d8f9e096cf9f326
Parents: 288d2fc
Author: Chris Douglas 
Authored: Thu Oct 19 16:33:47 2017 -0700
Committer: Chris Douglas 
Committed: Thu Oct 19 16:47:19 2017 -0700

--
 dev-support/docker/Dockerfile   | 161 +++
 dev-support/docker/hadoop_env_checks.sh |  15 ++-
 2 files changed, 98 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/612350eb/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 31ac611..8af002d 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -18,21 +18,28 @@
 # Dockerfile for installing the necessary dependencies for building Hadoop.
 # See BUILDING.txt.
 
-
-FROM ubuntu:trusty
+FROM ubuntu:xenial
 
 WORKDIR /root
 
+#
+# Disable suggests/recommends
+#
+RUN echo APT::Install-Recommends "0"\; > /etc/apt/apt.conf.d/10disableextras
+RUN echo APT::Install-Suggests "0"\; >>  /etc/apt/apt.conf.d/10disableextras
+
 ENV DEBIAN_FRONTEND noninteractive
 ENV DEBCONF_TERSE true
 
 ##
-# Install common dependencies from packages
+# Install common dependencies from packages. Versions here are either
+# sufficient or irrelevant.
 #
 # WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default
 # Ubuntu Java.  See Java section below!
 ##
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
+RUN apt-get -q update && apt-get -q install -y \
+apt-utils \
 build-essential \
 bzip2 \
 curl \
@@ -42,7 +49,6 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 gcc \
 git \
 gnupg-agent \
-make \
 libbz2-dev \
 libcurl4-openssl-dev \
 libfuse-dev \
@@ -51,106 +57,110 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 libsnappy-dev \
 libssl-dev \
 libtool \
+locales \
+make \
 pinentry-curses \
 pkg-config \
-protobuf-compiler \
-protobuf-c-compiler \
 python \
 python2.7 \
-python2.7-dev \
 python-pip \
+python-pkg-resources \
+python-setuptools \
+python-wheel \
 rsync \
+software-properties-common \
 snappy \
+sudo \
 zlib1g-dev
 
 ###
-# Oracle Java
+# OpenJDK 8
 ###
+RUN apt-get -q install -y openjdk-8-jdk
 
-RUN echo "dot_style = mega" > "/root/.wgetrc"
-RUN echo "quiet = on" >> "/root/.wgetrc"
-
-RUN apt-get -q install --no-install-recommends -y software-properties-common
-RUN add-apt-repository -y ppa:webupd8team/java
-RUN apt-get -q update
-
-# Auto-accept the Oracle JDK license
-RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select 
true | sudo /usr/bin/debconf-set-selections
-RUN apt-get -q install --no-install-recommends -y oracle-java8-installer
-
-
-# Apps that require Java
-###
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y ant
+###
+# OpenJDK 9
+# w/workaround for
+# https://bugs.launchpad.net/ubuntu/+source/openjdk-9/+bug/1593191
+###
+RUN apt-get -o Dpkg::Options::="--force-overwrite" \
+-q install -y \
+openjdk-9-jdk-headless
 
-##
-# Install Apache Maven
-##
-RUN mkdir -p /opt/maven && \
-curl -L -s -S \
- 
https://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz
 \
- -o /opt/maven.tar.gz && \
-tar xzf /opt/maven.tar.gz --strip-components 1 -C /opt/maven
-ENV MAVEN_HOME /opt/maven
-ENV PATH "${PATH}:/opt/maven/bin"
+###
+# Set default Java
+###
+#
+# By default, OpenJDK sets the default Java to the highest version.
+# We want the opposite, soo
+#
+RUN update-java-alternatives --set java-1.8.0-openjdk-amd64
+RUN update-alternatives --get-selections | grep -i jdk | \
+while read line; do \
+  alternative=$(echo $line | awk '{print $1}'); \
+  path=$(echo $line | awk '{print $3}'); \
+  newpath=$(echo $path | sed -e 's/java-9/java-8/'); \
+  update-alternatives --set $alternative $newpath; \
+done
 
 ##
-# Install cmake
+# Install cmake 3.1.0 (3.5.1 ships with Xenial)
 ##
 RUN mkdir -p /opt/cmake && \
 curl -L -s -S \
- 

hadoop git commit: HADOOP-14816. Update Dockerfile to use Xenial. Contributed by Allen Wittenauer

2017-10-19 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk cbd2b73ef -> ca8ddc6aa


HADOOP-14816. Update Dockerfile to use Xenial. Contributed by Allen Wittenauer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca8ddc6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca8ddc6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca8ddc6a

Branch: refs/heads/trunk
Commit: ca8ddc6aa413de347866ad9a0a3407356a280a1f
Parents: cbd2b73
Author: Chris Douglas 
Authored: Thu Oct 19 16:33:47 2017 -0700
Committer: Chris Douglas 
Committed: Thu Oct 19 16:45:18 2017 -0700

--
 dev-support/docker/Dockerfile   | 161 +++
 dev-support/docker/hadoop_env_checks.sh |  15 ++-
 2 files changed, 98 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca8ddc6a/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 31ac611..8af002d 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -18,21 +18,28 @@
 # Dockerfile for installing the necessary dependencies for building Hadoop.
 # See BUILDING.txt.
 
-
-FROM ubuntu:trusty
+FROM ubuntu:xenial
 
 WORKDIR /root
 
+#
+# Disable suggests/recommends
+#
+RUN echo APT::Install-Recommends "0"\; > /etc/apt/apt.conf.d/10disableextras
+RUN echo APT::Install-Suggests "0"\; >>  /etc/apt/apt.conf.d/10disableextras
+
 ENV DEBIAN_FRONTEND noninteractive
 ENV DEBCONF_TERSE true
 
 ##
-# Install common dependencies from packages
+# Install common dependencies from packages. Versions here are either
+# sufficient or irrelevant.
 #
 # WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default
 # Ubuntu Java.  See Java section below!
 ##
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
+RUN apt-get -q update && apt-get -q install -y \
+apt-utils \
 build-essential \
 bzip2 \
 curl \
@@ -42,7 +49,6 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 gcc \
 git \
 gnupg-agent \
-make \
 libbz2-dev \
 libcurl4-openssl-dev \
 libfuse-dev \
@@ -51,106 +57,110 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 libsnappy-dev \
 libssl-dev \
 libtool \
+locales \
+make \
 pinentry-curses \
 pkg-config \
-protobuf-compiler \
-protobuf-c-compiler \
 python \
 python2.7 \
-python2.7-dev \
 python-pip \
+python-pkg-resources \
+python-setuptools \
+python-wheel \
 rsync \
+software-properties-common \
 snappy \
+sudo \
 zlib1g-dev
 
 ###
-# Oracle Java
+# OpenJDK 8
 ###
+RUN apt-get -q install -y openjdk-8-jdk
 
-RUN echo "dot_style = mega" > "/root/.wgetrc"
-RUN echo "quiet = on" >> "/root/.wgetrc"
-
-RUN apt-get -q install --no-install-recommends -y software-properties-common
-RUN add-apt-repository -y ppa:webupd8team/java
-RUN apt-get -q update
-
-# Auto-accept the Oracle JDK license
-RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select 
true | sudo /usr/bin/debconf-set-selections
-RUN apt-get -q install --no-install-recommends -y oracle-java8-installer
-
-
-# Apps that require Java
-###
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y ant
+###
+# OpenJDK 9
+# w/workaround for
+# https://bugs.launchpad.net/ubuntu/+source/openjdk-9/+bug/1593191
+###
+RUN apt-get -o Dpkg::Options::="--force-overwrite" \
+-q install -y \
+openjdk-9-jdk-headless
 
-##
-# Install Apache Maven
-##
-RUN mkdir -p /opt/maven && \
-curl -L -s -S \
- 
https://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz
 \
- -o /opt/maven.tar.gz && \
-tar xzf /opt/maven.tar.gz --strip-components 1 -C /opt/maven
-ENV MAVEN_HOME /opt/maven
-ENV PATH "${PATH}:/opt/maven/bin"
+###
+# Set default Java
+###
+#
+# By default, OpenJDK sets the default Java to the highest version.
+# We want the opposite, soo
+#
+RUN update-java-alternatives --set java-1.8.0-openjdk-amd64
+RUN update-alternatives --get-selections | grep -i jdk | \
+while read line; do \
+  alternative=$(echo $line | awk '{print $1}'); \
+  path=$(echo $line | awk '{print $3}'); \
+  newpath=$(echo $path | sed -e 's/java-9/java-8/'); \
+  update-alternatives --set $alternative $newpath; \
+done
 
 ##
-# Install cmake
+# Install cmake 3.1.0 (3.5.1 ships with Xenial)
 ##
 RUN mkdir -p /opt/cmake && \
 curl -L -s -S \
- https://cmake.org/files/v3.1/cmake-3.1.0-Linux-x86_64.tar.gz \
- -o 

hadoop git commit: YARN-7294. TestSignalContainer#testSignalRequestDeliveryToNM fails intermittently with Fair Scheduler. (Contributed by Miklos Szegedi)

2017-10-19 Thread yufei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2e3b7130c -> 3828b33b8


YARN-7294. TestSignalContainer#testSignalRequestDeliveryToNM fails 
intermittently with Fair Scheduler. (Contributed by Miklos Szegedi)

(cherry picked from commit cbd2b73ef81a7e275c5d4f842cac5b81ff2f8c84)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3828b33b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3828b33b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3828b33b

Branch: refs/heads/branch-2
Commit: 3828b33b8b3b6f1863178212dd21242532c063f8
Parents: 2e3b713
Author: Yufei Gu 
Authored: Thu Oct 19 16:39:25 2017 -0700
Committer: Yufei Gu 
Committed: Thu Oct 19 16:41:25 2017 -0700

--
 .../yarn/server/resourcemanager/TestSignalContainer.java | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3828b33b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
index 2688987..fac0b96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 import java.util.ArrayList;
 import java.util.List;
 
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.junit.Assert;
 
 import org.apache.commons.logging.Log;
@@ -50,6 +51,10 @@ public class TestSignalContainer {
 Logger rootLogger = LogManager.getRootLogger();
 rootLogger.setLevel(Level.DEBUG);
 MockRM rm = new MockRM();
+FairScheduler fs = null;
+if (rm.getResourceScheduler().getClass() == FairScheduler.class) {
+  fs = (FairScheduler)rm.getResourceScheduler();
+}
 rm.start();
 
 MockNM nm1 = rm.registerNode("h1:1234", 5000);
@@ -78,6 +83,9 @@ public class TestSignalContainer {
   List allocation = am.allocate(new 
ArrayList(),
   new ArrayList()).getAllocatedContainers();
   conts.addAll(allocation);
+  if (fs != null) {
+nm1.nodeHeartbeat(true);
+  }
 }
 Assert.assertEquals(request, conts.size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7294. TestSignalContainer#testSignalRequestDeliveryToNM fails intermittently with Fair Scheduler. (Contributed by Miklos Szegedi)

2017-10-19 Thread yufei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 73cbc373e -> 288d2fc30


YARN-7294. TestSignalContainer#testSignalRequestDeliveryToNM fails 
intermittently with Fair Scheduler. (Contributed by Miklos Szegedi)

(cherry picked from commit cbd2b73ef81a7e275c5d4f842cac5b81ff2f8c84)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/288d2fc3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/288d2fc3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/288d2fc3

Branch: refs/heads/branch-3.0
Commit: 288d2fc3094f36174fcea3b1c378b65360b9c302
Parents: 73cbc37
Author: Yufei Gu 
Authored: Thu Oct 19 16:39:25 2017 -0700
Committer: Yufei Gu 
Committed: Thu Oct 19 16:40:36 2017 -0700

--
 .../yarn/server/resourcemanager/TestSignalContainer.java | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/288d2fc3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
index 2688987..fac0b96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 import java.util.ArrayList;
 import java.util.List;
 
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.junit.Assert;
 
 import org.apache.commons.logging.Log;
@@ -50,6 +51,10 @@ public class TestSignalContainer {
 Logger rootLogger = LogManager.getRootLogger();
 rootLogger.setLevel(Level.DEBUG);
 MockRM rm = new MockRM();
+FairScheduler fs = null;
+if (rm.getResourceScheduler().getClass() == FairScheduler.class) {
+  fs = (FairScheduler)rm.getResourceScheduler();
+}
 rm.start();
 
 MockNM nm1 = rm.registerNode("h1:1234", 5000);
@@ -78,6 +83,9 @@ public class TestSignalContainer {
   List allocation = am.allocate(new 
ArrayList(),
   new ArrayList()).getAllocatedContainers();
   conts.addAll(allocation);
+  if (fs != null) {
+nm1.nodeHeartbeat(true);
+  }
 }
 Assert.assertEquals(request, conts.size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7294. TestSignalContainer#testSignalRequestDeliveryToNM fails intermittently with Fair Scheduler. (Contributed by Miklos Szegedi)

2017-10-19 Thread yufei
Repository: hadoop
Updated Branches:
  refs/heads/trunk c1b08ba72 -> cbd2b73ef


YARN-7294. TestSignalContainer#testSignalRequestDeliveryToNM fails 
intermittently with Fair Scheduler. (Contributed by Miklos Szegedi)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbd2b73e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbd2b73e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbd2b73e

Branch: refs/heads/trunk
Commit: cbd2b73ef81a7e275c5d4f842cac5b81ff2f8c84
Parents: c1b08ba
Author: Yufei Gu 
Authored: Thu Oct 19 16:39:25 2017 -0700
Committer: Yufei Gu 
Committed: Thu Oct 19 16:39:25 2017 -0700

--
 .../yarn/server/resourcemanager/TestSignalContainer.java | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbd2b73e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
index 2688987..fac0b96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 import java.util.ArrayList;
 import java.util.List;
 
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.junit.Assert;
 
 import org.apache.commons.logging.Log;
@@ -50,6 +51,10 @@ public class TestSignalContainer {
 Logger rootLogger = LogManager.getRootLogger();
 rootLogger.setLevel(Level.DEBUG);
 MockRM rm = new MockRM();
+FairScheduler fs = null;
+if (rm.getResourceScheduler().getClass() == FairScheduler.class) {
+  fs = (FairScheduler)rm.getResourceScheduler();
+}
 rm.start();
 
 MockNM nm1 = rm.registerNode("h1:1234", 5000);
@@ -78,6 +83,9 @@ public class TestSignalContainer {
   List allocation = am.allocate(new 
ArrayList(),
   new ArrayList()).getAllocatedContainers();
   conts.addAll(allocation);
+  if (fs != null) {
+nm1.nodeHeartbeat(true);
+  }
 }
 Assert.assertEquals(request, conts.size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   >