hadoop git commit: HDFS-12037. Ozone: Improvement rest API output format for better looking. Contributed by Weiwei Yang.

2017-07-07 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 5fd38a6dc -> 4e3fbc869


HDFS-12037. Ozone: Improvement rest API output format for better looking. 
Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e3fbc86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e3fbc86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e3fbc86

Branch: refs/heads/HDFS-7240
Commit: 4e3fbc86977b6c925a117ad7a23dd41415d51fa6
Parents: 5fd38a6
Author: Weiwei Yang 
Authored: Sat Jul 8 10:06:58 2017 +0800
Committer: Weiwei Yang 
Committed: Sat Jul 8 10:06:58 2017 +0800

--
 .../java/org/apache/hadoop/ozone/web/response/BucketInfo.java | 3 ++-
 .../main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java   | 3 ++-
 .../java/org/apache/hadoop/ozone/web/response/ListBuckets.java| 3 ++-
 .../main/java/org/apache/hadoop/ozone/web/response/ListKeys.java  | 3 ++-
 .../java/org/apache/hadoop/ozone/web/response/ListVolumes.java| 3 ++-
 .../java/org/apache/hadoop/ozone/web/response/VolumeInfo.java | 3 ++-
 6 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e3fbc86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
index 1e47c16..53c7119 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
@@ -56,7 +56,8 @@ public class BucketInfo implements Comparable {
 mapper.setVisibility(PropertyAccessor.FIELD, 
JsonAutoDetect.Visibility.ANY);
 mapper.addMixIn(Object.class, MixIn.class);
 
-WRITER = mapper.writer(filters);
+mapper.setFilterProvider(filters);
+WRITER = mapper.writerWithDefaultPrettyPrinter();
   }
 
   private String volumeName;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e3fbc86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
index 69be5b9..e5cfd21 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
@@ -54,7 +54,8 @@ public class KeyInfo implements Comparable {
 JsonAutoDetect.Visibility.ANY);
 mapper.addMixIn(Object.class, MixIn.class);
 
-WRITER = mapper.writer(filters);
+mapper.setFilterProvider(filters);
+WRITER = mapper.writerWithDefaultPrettyPrinter();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e3fbc86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
index 3b0d32e..bc4e65b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
@@ -55,7 +55,8 @@ public class ListBuckets {
 JsonAutoDetect.Visibility.ANY);
 mapper.addMixIn(Object.class, MixIn.class);
 
-WRITER = mapper.writer(filters);
+mapper.setFilterProvider(filters);
+WRITER = mapper.writerWithDefaultPrettyPrinter();
   }
 
   private List buckets;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e3fbc86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
index fd76e4a..9dc77d2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
+++ 

hadoop git commit: MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is incompatible with DB2. Contributed by ramtin and Gergely Novák.

2017-07-07 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.2 131f1f74c -> e64966935


MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is 
incompatible with DB2. Contributed by ramtin and Gergely Novák.

(cherry picked from commit f484a6ff602d48413556a1d046670e2003c71c2e)
(cherry picked from commit f823f9fd784fa4944178247a82df24fba03c2051)
(cherry picked from commit 5ad710bc7aec08b19ce1900c089ae6384fe42d6a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6496693
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6496693
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6496693

Branch: refs/heads/branch-2.8.2
Commit: e6496693532a44087041269f0235d11a214abd85
Parents: 131f1f7
Author: Junping Du 
Authored: Fri Jul 7 13:23:43 2017 -0700
Committer: Junping Du 
Committed: Fri Jul 7 14:20:21 2017 -0700

--
 .../hadoop/mapreduce/lib/db/DBOutputFormat.java | 15 ++-
 .../mapreduce/lib/db/TestDBOutputFormat.java| 45 
 2 files changed, 58 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6496693/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
index 2e3a9d8..c222bf5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DatabaseMetaData;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 
@@ -51,6 +52,8 @@ public class DBOutputFormat
 extends OutputFormat {
 
   private static final Log LOG = LogFactory.getLog(DBOutputFormat.class);
+  public String dbProductName = "DEFAULT";
+
   public void checkOutputSpecs(JobContext context) 
   throws IOException, InterruptedException {}
 
@@ -158,7 +161,12 @@ extends OutputFormat {
 query.append(",");
   }
 }
-query.append(");");
+
+if (dbProductName.startsWith("DB2") || dbProductName.startsWith("ORACLE")) 
{
+  query.append(")");
+} else {
+  query.append(");");
+}
 
 return query.toString();
   }
@@ -177,7 +185,10 @@ extends OutputFormat {
 try {
   Connection connection = dbConf.getConnection();
   PreparedStatement statement = null;
-  
+
+  DatabaseMetaData dbMeta = connection.getMetaData();
+  this.dbProductName = dbMeta.getDatabaseProductName().toUpperCase();
+
   statement = connection.prepareStatement(
 constructQuery(tableName, fieldNames));
   return new DBRecordWriter(connection, statement);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6496693/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
index 014855f..e547c8a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
+import java.lang.reflect.Field;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
@@ -26,6 +28,7 @@ import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
 
 

hadoop git commit: MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is incompatible with DB2. Contributed by ramtin and Gergely Novák.

2017-07-07 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d657c0517 -> 5ad710bc7


MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is 
incompatible with DB2. Contributed by ramtin and Gergely Novák.

(cherry picked from commit f484a6ff602d48413556a1d046670e2003c71c2e)
(cherry picked from commit f823f9fd784fa4944178247a82df24fba03c2051)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ad710bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ad710bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ad710bc

Branch: refs/heads/branch-2.8
Commit: 5ad710bc7aec08b19ce1900c089ae6384fe42d6a
Parents: d657c05
Author: Junping Du 
Authored: Fri Jul 7 13:23:43 2017 -0700
Committer: Junping Du 
Committed: Fri Jul 7 13:32:21 2017 -0700

--
 .../hadoop/mapreduce/lib/db/DBOutputFormat.java | 15 ++-
 .../mapreduce/lib/db/TestDBOutputFormat.java| 45 
 2 files changed, 58 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ad710bc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
index 2e3a9d8..c222bf5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DatabaseMetaData;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 
@@ -51,6 +52,8 @@ public class DBOutputFormat
 extends OutputFormat {
 
   private static final Log LOG = LogFactory.getLog(DBOutputFormat.class);
+  public String dbProductName = "DEFAULT";
+
   public void checkOutputSpecs(JobContext context) 
   throws IOException, InterruptedException {}
 
@@ -158,7 +161,12 @@ extends OutputFormat {
 query.append(",");
   }
 }
-query.append(");");
+
+if (dbProductName.startsWith("DB2") || dbProductName.startsWith("ORACLE")) 
{
+  query.append(")");
+} else {
+  query.append(");");
+}
 
 return query.toString();
   }
@@ -177,7 +185,10 @@ extends OutputFormat {
 try {
   Connection connection = dbConf.getConnection();
   PreparedStatement statement = null;
-  
+
+  DatabaseMetaData dbMeta = connection.getMetaData();
+  this.dbProductName = dbMeta.getDatabaseProductName().toUpperCase();
+
   statement = connection.prepareStatement(
 constructQuery(tableName, fieldNames));
   return new DBRecordWriter(connection, statement);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ad710bc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
index 014855f..e547c8a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
+import java.lang.reflect.Field;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
@@ -26,6 +28,7 @@ import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
 
 public class TestDBOutputFormat {
   private String[] fieldNames = new 

[35/50] [abbrv] hadoop git commit: YARN-6708. Nodemanager container crash after ext3 folder limit. Contributed by Bibin A Chundatt

2017-07-07 Thread xgong
YARN-6708. Nodemanager container crash after ext3 folder limit. Contributed by 
Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7576a688
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7576a688
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7576a688

Branch: refs/heads/YARN-5734
Commit: 7576a688ea84aed7206321b1f03594e43a5f216e
Parents: 946dd25
Author: Jason Lowe 
Authored: Thu Jul 6 09:40:09 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 6 09:40:09 2017 -0500

--
 .../localizer/ContainerLocalizer.java   | 37 +
 .../localizer/TestContainerLocalizer.java   | 43 +++-
 2 files changed, 71 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7576a688/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
index 6e79857..8a46491 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
@@ -31,6 +31,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.Stack;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CancellationException;
 import java.util.concurrent.CompletionService;
@@ -60,6 +61,7 @@ import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.SerializedException;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -95,6 +97,8 @@ public class ContainerLocalizer {
   private static final String USERCACHE_CTXT_FMT = "%s.user.cache.dirs";
   private static final FsPermission FILECACHE_PERMS =
   new FsPermission((short)0710);
+  private static final FsPermission USERCACHE_FOLDER_PERMS =
+  new FsPermission((short) 0755);
 
   private final String user;
   private final String appId;
@@ -237,10 +241,29 @@ public class ContainerLocalizer {
 
   }
 
-  Callable download(Path path, LocalResource rsrc,
+  Callable download(Path destDirPath, LocalResource rsrc,
   UserGroupInformation ugi) throws IOException {
-diskValidator.checkStatus(new File(path.toUri().getRawPath()));
-return new FSDownloadWrapper(lfs, ugi, conf, path, rsrc);
+// For private localization FsDownload creates folder in destDirPath. 
Parent
+// directories till user filecache folder is created here.
+if (rsrc.getVisibility() == LocalResourceVisibility.PRIVATE) {
+  createParentDirs(destDirPath);
+}
+diskValidator.checkStatus(new File(destDirPath.toUri().getRawPath()));
+return new FSDownloadWrapper(lfs, ugi, conf, destDirPath, rsrc);
+  }
+
+  private void createParentDirs(Path destDirPath) throws IOException {
+Path parent = destDirPath.getParent();
+Path cacheRoot = LocalCacheDirectoryManager.getCacheDirectoryRoot(parent);
+Stack dirs = new Stack();
+while (!parent.equals(cacheRoot)) {
+  dirs.push(parent);
+  parent = parent.getParent();
+}
+// Create directories with user cache permission
+while (!dirs.isEmpty()) {
+  createDir(lfs, dirs.pop(), USERCACHE_FOLDER_PERMS);
+}
   }
 
   static long getEstimatedSize(LocalResource rsrc) {
@@ -455,21 +478,21 @@ public class ContainerLocalizer {
   // $x/usercache/$user/filecache
   Path userFileCacheDir = new Path(base, FILECACHE);
   usersFileCacheDirs[i] = userFileCacheDir.toString();
-  createDir(lfs, userFileCacheDir, FILECACHE_PERMS, false);
+  createDir(lfs, userFileCacheDir, FILECACHE_PERMS);
   // $x/usercache/$user/appcache/$appId
   Path appBase = new Path(base, new Path(APPCACHE, 

[39/50] [abbrv] hadoop git commit: HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He.

2017-07-07 Thread xgong
HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. 
Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cd09527
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cd09527
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cd09527

Branch: refs/heads/YARN-5734
Commit: 7cd095272caa724d11802690544b38d0baaf247d
Parents: 8fc5dcc
Author: Akira Ajisaka 
Authored: Sat Jul 8 02:54:24 2017 +0900
Committer: Akira Ajisaka 
Committed: Sat Jul 8 02:54:24 2017 +0900

--
 .../fs/FileContextCreateMkdirBaseTest.java  |  2 +-
 .../hadoop/fs/FileContextPermissionBase.java|  5 ++--
 .../apache/hadoop/fs/FileContextUtilBase.java   |  5 ++--
 .../org/apache/hadoop/fs/TestListFiles.java |  4 +--
 .../fs/TestLocalFileSystemPermission.java   |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java  |  7 ++---
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  6 ++--
 .../hadoop/ha/TestZKFailoverController.java |  5 ++--
 .../io/serializer/TestSerializationFactory.java |  6 ++--
 .../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 -
 .../java/org/apache/hadoop/ipc/TestIPC.java |  5 ++--
 .../apache/hadoop/ipc/TestMiniRPCBenchmark.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 16 +--
 .../hadoop/security/TestGroupFallback.java  | 12 
 .../hadoop/security/TestUGIWithMiniKdc.java |  2 +-
 .../security/TestUserGroupInformation.java  |  2 +-
 .../hadoop/security/ssl/TestSSLFactory.java |  2 +-
 .../delegation/web/TestWebDelegationToken.java  |  2 +-
 .../apache/hadoop/test/GenericTestUtils.java| 30 
 .../hadoop/test/TestGenericTestUtils.java   | 10 +++
 .../hadoop/util/Crc32PerformanceTest.java   | 11 +++
 hadoop-common-project/hadoop-nfs/pom.xml|  6 
 .../apache/hadoop/oncrpc/TestFrameDecoder.java  |  6 ++--
 24 files changed, 104 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index c1de27a..fbd598c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -30,6 +29,7 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
 
 import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
 
 /**
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
index dff89f9..240989e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -32,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
@@ -61,8 +63,7 @@ public abstract class FileContextPermissionBase {
   
   {
 try {
-  ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
-  .setLevel(org.apache.log4j.Level.DEBUG);
+  GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
   

[21/50] [abbrv] hadoop git commit: HDFS-12042. Lazy initialize AbstractINodeDiffList#diffs for snapshots to reduce memory consumption. Contributed by Misha Dmitriev.

2017-07-07 Thread xgong
HDFS-12042. Lazy initialize AbstractINodeDiffList#diffs for snapshots to reduce 
memory consumption. Contributed by Misha Dmitriev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcba844d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcba844d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcba844d

Branch: refs/heads/YARN-5734
Commit: bcba844d1144cc334e2babbc34c9d42eac1c203a
Parents: 6a9dc5f
Author: Wei-Chiu Chuang 
Authored: Fri Jun 30 10:28:01 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Jun 30 10:28:01 2017 -0700

--
 .../hdfs/server/namenode/INodeDirectory.java|  7 ++-
 .../snapshot/AbstractINodeDiffList.java | 53 +++-
 .../namenode/TestTruncateQuotaUpdate.java   |  1 +
 3 files changed, 46 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcba844d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index a29a118..4012783 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -65,8 +65,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
 return inode.asDirectory(); 
   }
 
-  protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
-  final static byte[] ROOT_NAME = DFSUtil.string2Bytes("");
+  // Profiling shows that most of the file lists are between 1 and 4 elements.
+  // Thus allocate the corresponding ArrayLists with a small initial capacity.
+  public static final int DEFAULT_FILES_PER_DIRECTORY = 2;
+
+  static final byte[] ROOT_NAME = DFSUtil.string2Bytes("");
 
   private List children = null;
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcba844d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index 64825f1..98d8c53 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -24,6 +24,7 @@ import java.util.List;
 
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 
 /**
  * A list of snapshot diffs for storing snapshot data.
@@ -35,17 +36,19 @@ abstract class AbstractINodeDiffList> 
 implements Iterable {
-  /** Diff list sorted by snapshot IDs, i.e. in chronological order. */
-  private final List diffs = new ArrayList();
+  /** Diff list sorted by snapshot IDs, i.e. in chronological order.
+* Created lazily to avoid wasting memory by empty lists. */
+  private List diffs;
 
   /** @return this list as a unmodifiable {@link List}. */
   public final List asList() {
-return Collections.unmodifiableList(diffs);
+return diffs != null ?
+Collections.unmodifiableList(diffs) : Collections.emptyList();
   }
   
-  /** Get the size of the list and then clear it. */
+  /** Clear the list. */
   public void clear() {
-diffs.clear();
+diffs = null;
   }
 
   /** @return an {@link AbstractINodeDiff}. */
@@ -66,6 +69,9 @@ abstract class AbstractINodeDiffList 0) {
@@ -103,6 +112,7 @@ abstract class AbstractINodeDiffList(INodeDirectory.DEFAULT_FILES_PER_DIRECTORY);
+}
   }
 
   /** @return the id of the last snapshot. */
@@ -139,10 +159,14 @@ abstract class AbstractINodeDiffList 0) {
@@ -275,11 +302,11 @@ abstract class AbstractINodeDiffList iterator() {
-return diffs.iterator();
+return diffs != null ? diffs.iterator() : Collections.emptyIterator();
   }
 
   @Override
   public String toString() {
-return getClass().getSimpleName() + ": " + diffs;
+return getClass().getSimpleName() + ": " + (diffs != null ? diffs : "[]");
   }
 }


[48/50] [abbrv] hadoop git commit: YARN-5949. Add pluggable configuration ACL policy interface and implementation. (Jonathan Hung via wangda)

2017-07-07 Thread xgong
YARN-5949. Add pluggable configuration ACL policy interface and implementation. 
(Jonathan Hung via wangda)

Change-Id: Ib98e82ff753bede21fcab2e6ca9ec1e7a5a2008f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee373da6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee373da6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee373da6

Branch: refs/heads/YARN-5734
Commit: ee373da6a32a2b73a2ad605407798ef00f3e8b0e
Parents: fa52bab
Author: Wangda Tan 
Authored: Mon May 22 13:38:31 2017 -0700
Committer: Xuan 
Committed: Fri Jul 7 14:12:48 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +
 .../src/main/resources/yarn-default.xml |  11 ++
 .../ConfigurationMutationACLPolicy.java |  47 ++
 .../ConfigurationMutationACLPolicyFactory.java  |  49 ++
 .../DefaultConfigurationMutationACLPolicy.java  |  45 ++
 .../scheduler/MutableConfScheduler.java |  19 ++-
 .../scheduler/MutableConfigurationProvider.java |   8 +-
 .../scheduler/capacity/CapacityScheduler.java   |   6 +-
 .../conf/MutableCSConfigurationProvider.java| 151 +-
 ...ueueAdminConfigurationMutationACLPolicy.java |  96 
 .../resourcemanager/webapp/RMWebServices.java   | 132 +---
 .../TestConfigurationMutationACLPolicies.java   | 154 +++
 .../TestMutableCSConfigurationProvider.java |  40 +++--
 13 files changed, 610 insertions(+), 151 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee373da6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f21c1f0..6ab4c7f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -618,6 +618,9 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_CONFIGURATION_STORE =
   MEMORY_CONFIGURATION_STORE;
 
+  public static final String RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS =
+  YARN_PREFIX + "scheduler.configuration.mutation.acl-policy.class";
+
   public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
   + "authorization-provider";
   private static final List RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee373da6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 412248e..a66fd44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3128,4 +3128,15 @@
 memory
   
 
+  
+
+  The class to use for configuration mutation ACL policy if using a mutable
+  configuration provider. Controls whether a mutation request is allowed.
+  The DefaultConfigurationMutationACLPolicy checks if the requestor is a
+  YARN admin.
+
+yarn.scheduler.configuration.mutation.acl-policy.class
+
org.apache.hadoop.yarn.server.resourcemanager.scheduler.DefaultConfigurationMutationACLPolicy
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee373da6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
new file mode 100644
index 000..724487b
--- /dev/null
+++ 

[27/50] [abbrv] hadoop git commit: HDFS-12079. Description of dfs.block.invalidate.limit is incorrect in hdfs-default.xml. Contributed by Weiwei Yang.

2017-07-07 Thread xgong
HDFS-12079. Description of dfs.block.invalidate.limit is incorrect in 
hdfs-default.xml. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0560e06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0560e06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0560e06

Branch: refs/heads/YARN-5734
Commit: b0560e0624756e2b3ce7b6bc741eee3c18d2a873
Parents: bf1f599
Author: Akira Ajisaka 
Authored: Tue Jul 4 14:02:14 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Jul 4 14:02:14 2017 +0900

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0560e06/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index be345af..96c04f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -3375,7 +3375,10 @@
   dfs.block.invalidate.limit
   1000
   
-Limit on the list of invalidated block list kept by the Namenode.
+The maximum number of invalidate blocks sent by namenode to a datanode
+per heartbeat deletion command. This property works with
+"dfs.namenode.invalidate.work.pct.per.iteration" to throttle block
+deletions.
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: Add release notes, changes, jdiff for 3.0.0-alpha4

2017-07-07 Thread xgong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f10864a8/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
new file mode 100644
index 000..3ad6cc6
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
@@ -0,0 +1,492 @@
+
+
+# "Apache Hadoop"  3.0.0-alpha4 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-13956](https://issues.apache.org/jira/browse/HADOOP-13956) | 
*Critical* | **Read ADLS credentials from Credential Provider**
+
+The hadoop-azure-datalake file system now supports configuration of the Azure 
Data Lake Store account credentials using the standard Hadoop Credential 
Provider API. For details, please refer to the documentation on 
hadoop-azure-datalake and the Credential Provider API.
+
+
+---
+
+* [MAPREDUCE-6404](https://issues.apache.org/jira/browse/MAPREDUCE-6404) | 
*Major* | **Allow AM to specify a port range for starting its webapp**
+
+Add a new configuration - "yarn.app.mapreduce.am.webapp.port-range" to specify 
port-range for webapp launched by AM.
+
+
+---
+
+* [HDFS-10860](https://issues.apache.org/jira/browse/HDFS-10860) | *Blocker* | 
**Switch HttpFS from Tomcat to Jetty**
+
+
+
+The following environment variables are deprecated. Set the corresponding
+configuration properties instead.
+
+Environment Variable| Configuration Property   | Configuration File
+|--|
+HTTPFS_TEMP | hadoop.http.temp.dir | httpfs-site.xml
+HTTPFS_HTTP_PORT| hadoop.httpfs.http.port  | httpfs-site.xml
+HTTPFS_MAX_HTTP_HEADER_SIZE | hadoop.http.max.request.header.size and 
hadoop.http.max.response.header.size | httpfs-site.xml
+HTTPFS_MAX_THREADS  | hadoop.http.max.threads  | httpfs-site.xml
+HTTPFS_SSL_ENABLED  | hadoop.httpfs.ssl.enabled| httpfs-site.xml
+HTTPFS_SSL_KEYSTORE_FILE| ssl.server.keystore.location | ssl-server.xml
+HTTPFS_SSL_KEYSTORE_PASS| ssl.server.keystore.password | ssl-server.xml
+
+These default HTTP Services have been added.
+
+Name   | Description
+---|
+/conf  | Display configuration properties
+/jmx   | Java JMX management interface
+/logLevel  | Get or set log level per class
+/logs  | Display log files
+/stacks| Display JVM stacks
+/static/index.html | The static home page
+
+Script httpfs.sh has been deprecated, use `hdfs httpfs` instead. The new 
scripts are based on the Hadoop shell scripting framework. `hadoop daemonlog` 
is supported. SSL configurations are read from ssl-server.xml.
+
+
+---
+
+* [HDFS-11210](https://issues.apache.org/jira/browse/HDFS-11210) | *Major* | 
**Enhance key rolling to guarantee new KeyVersion is returned from 
generateEncryptedKeys after a key is rolled**
+
+ 
+
+An `invalidateCache` command has been added to the KMS.
+The `rollNewVersion` semantics of the KMS has been improved so that after a 
key's version is rolled, `generateEncryptedKey` of that key guarantees to 
return the `EncryptedKeyVersion` based on the new key version.
+
+
+---
+
+* [HADOOP-13075](https://issues.apache.org/jira/browse/HADOOP-13075) | *Major* 
| **Add support for SSE-KMS and SSE-C in s3a filesystem**
+
+The new encryption options SSE-KMS and especially SSE-C must be considered 
experimental at present. If you are using SSE-C, problems may arise if the 
bucket mixes encrypted and unencrypted files. For SSE-KMS, there may be extra 
throttling of IO, especially with the fadvise=random option. You may wish to 
request an increase in your KMS IOPs limits.
+
+
+---
+
+* [HDFS-11026](https://issues.apache.org/jira/browse/HDFS-11026) | *Major* | 
**Convert BlockTokenIdentifier to use Protobuf**
+
+Changed the serialized format of BlockTokenIdentifier to protocol buffers. 
Includes logic to decode both the old Writable format and the new PB format to 
support existing clients. Client implementations in other languages will 
require similar functionality.
+
+
+---
+
+* [HADOOP-13929](https://issues.apache.org/jira/browse/HADOOP-13929) | *Major* 
| **ADLS connector should not check in contract-test-options.xml**
+
+To run live unit tests, create src/test/resources/auth-keys.xml with the same 
properties as in the deprecated contract-test-options.xml.
+
+
+---
+
+* [HDFS-11100](https://issues.apache.org/jira/browse/HDFS-11100) | *Critical* 
| **Recursively deleting file 

[10/50] [abbrv] hadoop git commit: HADOOP-14602. allow custom release notes/changelog during create-release

2017-07-07 Thread xgong
HADOOP-14602. allow custom release notes/changelog during create-release

Signed-off-by: Chris Douglas 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c52da7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c52da7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c52da7d

Branch: refs/heads/YARN-5734
Commit: 0c52da7d3e381ca59cd0ff72d143066a5c28d826
Parents: 16c8dbd
Author: Allen Wittenauer 
Authored: Wed Jun 28 07:37:09 2017 -0700
Committer: Allen Wittenauer 
Committed: Thu Jun 29 08:03:16 2017 -0700

--
 dev-support/bin/create-release | 36 +++-
 1 file changed, 31 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c52da7d/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 94351d3..b22e90b 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -50,6 +50,7 @@ function hadoop_abs
   declare obj=$1
   declare dir
   declare fn
+  declare ret
 
   if [[ ! -e ${obj} ]]; then
 return 1
@@ -62,7 +63,8 @@ function hadoop_abs
   fi
 
   dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
-  if [[ $? = 0 ]]; then
+  ret=$?
+  if [[ ${ret} = 0 ]]; then
 echo "${dir}${fn}"
 return 0
   fi
@@ -287,6 +289,7 @@ function usage
   echo "--mvncache=[path]   Path to the maven cache to use"
   echo "--nativeAlso build the native components"
   echo "--rc-label=[label]  Add this label to the builds"
+  echo "--security  Emergency security release"
   echo "--sign  Use .gnupg dir to sign the artifacts and jars"
   echo "--version=[version] Use an alternative version string"
 }
@@ -330,6 +333,9 @@ function option_parse
   --rc-label=*)
 RC_LABEL=${i#*=}
   ;;
+  --security)
+SECURITYRELEASE=true
+  ;;
   --sign)
 SIGN=true
   ;;
@@ -397,6 +403,14 @@ function option_parse
   MVN_ARGS=("-Dmaven.repo.local=${MVNCACHE}")
 fi
   fi
+
+  if [[ "${SECURITYRELEASE}" = true ]]; then
+if [[ ! -d 
"${BASEDIR}/hadoop-common-project/hadoop-common/src/site/markdown/release/${HADOOP_VERSION}"
 ]]; then
+  hadoop_error "ERROR: 
${BASEDIR}/hadoop-common-project/hadoop-common/src/site/markdown/release/${HADOOP_VERSION}
 does not exist."
+  hadoop_error "ERROR: This directory and its contents are required to be 
manually created for a security release."
+  exit 1
+fi
+  fi
 }
 
 function dockermode
@@ -523,7 +537,7 @@ function makearelease
   big_console_header "Maven Build and Install"
 
   if [[ "${SIGN}" = true ]]; then
-signflags=("-Psign" "-Dgpg.useagent=true" -Dgpg.executable="${GPG}")
+signflags=("-Psign" "-Dgpg.useagent=true" "-Dgpg.executable=${GPG}")
   fi
 
   # Create SRC and BIN tarballs for release,
@@ -534,6 +548,14 @@ function makearelease
   "${signflags[@]}" \
   -DskipTests -Dtar $(hadoop_native_flags)
 
+  if [[ "${SECURITYRELEASE}" = true ]]; then
+DOCFLAGS="-Pdocs"
+hadoop_error "WARNING: Skipping automatic changelog and release notes 
generation due to --security"
+  else
+DOCFLAGS="-Preleasedocs,docs"
+  fi
+
+
   # Create site for release
   # we need to do install again so that jdiff and
   # a few other things get registered in the maven
@@ -542,7 +564,8 @@ function makearelease
 "${MVN}" "${MVN_ARGS[@]}" install \
   site site:stage \
   -DskipTests \
-  -Pdist,src,releasedocs,docs
+  -Pdist,src \
+  "${DOCFLAGS}"
 
   big_console_header "Staging the release"
 
@@ -586,6 +609,7 @@ function makearelease
 function signartifacts
 {
   declare i
+  declare ret
 
   if [[ "${SIGN}" = false ]]; then
 for i in ${ARTIFACTS_DIR}/*; do
@@ -612,7 +636,8 @@ function signartifacts
 ${GPG} --verify --trustdb "${BASEDIR}/target/testkeysdb" \
   "${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz.asc" \
 "${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz"
-if [[ $? != 0 ]]; then
+ret=$?
+if [[ ${ret} != 0 ]]; then
   hadoop_error "ERROR: GPG key is not present in ${PUBKEYFILE}."
   hadoop_error "ERROR: This MUST be fixed. Exiting."
   exit 1
@@ -641,6 +666,7 @@ if [[ "${INDOCKER}" = true || "${DOCKERRAN}" = false ]]; 
then
   startgpgagent
 
   makearelease
+  releaseret=$?
 
   signartifacts
 
@@ -651,7 +677,7 @@ if [[ "${INDOCKER}" = true ]]; then
   exit $?
 fi
 
-if [[ $? == 0 ]]; then
+if [[ ${releaseret} == 0 ]]; then
   echo
   echo "Congratulations, you have successfully built the release"
   echo "artifacts for Apache Hadoop ${HADOOP_VERSION}${RC_LABEL}"



[25/50] [abbrv] hadoop git commit: YARN-6681. Eliminate double-copy of child queues in canAssignToThisQueue. Contributed by Daryn Sharp.

2017-07-07 Thread xgong
YARN-6681. Eliminate double-copy of child queues in canAssignToThisQueue. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa1aaee8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa1aaee8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa1aaee8

Branch: refs/heads/YARN-5734
Commit: fa1aaee87b0141a0255b5f8e5fd8e8f49d7efe86
Parents: 147df30
Author: Naganarasimha 
Authored: Sat Jul 1 12:29:39 2017 +0530
Committer: Naganarasimha 
Committed: Sat Jul 1 12:29:39 2017 +0530

--
 .../resourcemanager/scheduler/capacity/AbstractCSQueue.java  | 8 +++-
 .../resourcemanager/scheduler/capacity/ParentQueue.java  | 7 ++-
 2 files changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa1aaee8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index b69ec96..5fbdead 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -615,6 +616,11 @@ public abstract class AbstractCSQueue implements CSQueue {
 minimumAllocation);
   }
 
+  public boolean hasChildQueues() {
+List childQueues = getChildQueues();
+return childQueues != null && !childQueues.isEmpty();
+  }
+
   boolean canAssignToThisQueue(Resource clusterResource,
   String nodePartition, ResourceLimits currentResourceLimits,
   Resource resourceCouldBeUnreserved, SchedulingMode schedulingMode) {
@@ -640,7 +646,7 @@ public abstract class AbstractCSQueue implements CSQueue {
   // When queue is a parent queue: Headroom = limit - used + killable
   // When queue is a leaf queue: Headroom = limit - used (leaf queue 
cannot preempt itself)
   Resource usedExceptKillable = nowTotalUsed;
-  if (null != getChildQueues() && !getChildQueues().isEmpty()) {
+  if (hasChildQueues()) {
 usedExceptKillable = Resources.subtract(nowTotalUsed,
 getTotalKillableResource(nodePartition));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa1aaee8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 91fedbc..f6ada4f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -845,7 +845,12 @@ public class ParentQueue extends AbstractCSQueue {
   writeLock.unlock();
 }
   }
-  
+
+  @Override
+  public boolean hasChildQueues() {
+return true;
+  }
+
   @Override
   public List getChildQueues() {
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: HADOOP-14563. LoadBalancingKMSClientProvider#warmUpEncryptedKeys swallows IOException. Contributed by Rushabh S Shah.

2017-07-07 Thread xgong
HADOOP-14563. LoadBalancingKMSClientProvider#warmUpEncryptedKeys swallows 
IOException. Contributed by Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8153fe2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8153fe2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8153fe2b

Branch: refs/heads/YARN-5734
Commit: 8153fe2bd35fb4df0b64f93ac0046e34d4807ac3
Parents: 82cb2a6
Author: Wei-Chiu Chuang 
Authored: Fri Jul 7 06:13:10 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Jul 7 06:13:10 2017 -0700

--
 .../key/kms/LoadBalancingKMSClientProvider.java | 12 +++-
 .../kms/TestLoadBalancingKMSClientProvider.java | 63 
 2 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8153fe2b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index aa24993..de9c988 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -39,6 +39,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 /**
  * A simple LoadBalancing KMSClientProvider that round-robins requests
@@ -159,15 +160,24 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   // This request is sent to all providers in the load-balancing group
   @Override
   public void warmUpEncryptedKeys(String... keyNames) throws IOException {
+Preconditions.checkArgument(providers.length > 0,
+"No providers are configured");
+boolean success = false;
+IOException e = null;
 for (KMSClientProvider provider : providers) {
   try {
 provider.warmUpEncryptedKeys(keyNames);
+success = true;
   } catch (IOException ioe) {
+e = ioe;
 LOG.error(
 "Error warming up keys for provider with url"
-+ "[" + provider.getKMSUrl() + "]");
++ "[" + provider.getKMSUrl() + "]", ioe);
   }
 }
+if (!success && e != null) {
+  throw e;
+}
   }
 
   // This request is sent to all providers in the load-balancing group

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8153fe2b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index 499b991..d14dd59 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider.Options;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -257,4 +258,66 @@ public class TestLoadBalancingKMSClientProvider {
   "AuthenticationException"));
 }
   }
+
+  /**
+   * tests {@link 
LoadBalancingKMSClientProvider#warmUpEncryptedKeys(String...)}
+   * error handling in case when all the providers throws {@link IOException}.
+   * @throws Exception
+   */
+  @Test
+  public void testWarmUpEncryptedKeysWhenAllProvidersFail() throws Exception {
+Configuration conf = new Configuration();
+KMSClientProvider p1 = mock(KMSClientProvider.class);
+String keyName = "key1";
+Mockito.doThrow(new IOException(new AuthorizationException("p1"))).when(p1)
+.warmUpEncryptedKeys(Mockito.anyString());
+KMSClientProvider p2 = mock(KMSClientProvider.class);
+Mockito.doThrow(new IOException(new 

[22/50] [abbrv] hadoop git commit: HADOOP-14443. Azure: Support retry and client side failover for authorization, SASKey and delegation token generation. Contributed by Santhosh G Nayak

2017-07-07 Thread xgong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
index a0276cb5..fbd7f62 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
@@ -46,7 +46,7 @@ public class TestNativeAzureFileSystemAuthorization
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
 Configuration conf = new Configuration();
 conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
-conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URL, 
"http://localhost/;);
+conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, 
"http://localhost/;);
 return AzureBlobStorageTestAccount.create(conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
index 77be1b8..f459b24 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
@@ -21,34 +21,48 @@ package org.apache.hadoop.fs.azure;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.http.*;
+import org.apache.hadoop.io.retry.RetryUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpStatus;
+import org.apache.http.StatusLine;
+import org.apache.http.ProtocolVersion;
+import org.apache.http.ParseException;
+import org.apache.http.HeaderElement;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.methods.HttpGet;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 
 import java.io.ByteArrayInputStream;
-import java.io.UnsupportedEncodingException;
-import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
 
 import static 
org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.times;
 
 /**
  * Test class to hold all WasbRemoteCallHelper tests
  */
 public class TestWasbRemoteCallHelper
 extends AbstractWasbTestBase {
+  public static final String EMPTY_STRING = "";
+  private static final int INVALID_HTTP_STATUS_CODE_999 = 999;
 
   @Override
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
 Configuration conf = new Configuration();
 conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
-conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URL, 
"http://localhost/;);
+conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, 
"http://localhost1/,http://localhost2/;);
 return AzureBlobStorageTestAccount.create(conf);
   }
 
@@ -80,7 +94,7 @@ public class TestWasbRemoteCallHelper
 HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
 HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
 
Mockito.when(mockHttpClient.execute(Mockito.any())).thenReturn(mockHttpResponse);
-
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(999));
+
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999));
 // finished setting up mocks
 
 performop(mockHttpClient);
@@ -99,7 +113,7 @@ public class TestWasbRemoteCallHelper
 HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
 HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
 
Mockito.when(mockHttpClient.execute(Mockito.any())).thenReturn(mockHttpResponse);
-
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(200));
+
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK));
   

[36/50] [abbrv] hadoop git commit: HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He.

2017-07-07 Thread xgong
HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. 
Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82cb2a64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82cb2a64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82cb2a64

Branch: refs/heads/YARN-5734
Commit: 82cb2a6497caa7c5e693aa41ad18e92f1c7eb16a
Parents: 7576a68
Author: Akira Ajisaka 
Authored: Fri Jul 7 14:55:46 2017 +0900
Committer: Akira Ajisaka 
Committed: Fri Jul 7 15:00:47 2017 +0900

--
 .../fs/FileContextCreateMkdirBaseTest.java  |  2 +-
 .../hadoop/fs/FileContextPermissionBase.java|  5 ++--
 .../apache/hadoop/fs/FileContextUtilBase.java   |  5 ++--
 .../org/apache/hadoop/fs/TestListFiles.java |  4 +--
 .../fs/TestLocalFileSystemPermission.java   |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java  |  7 ++---
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  6 ++--
 .../hadoop/ha/TestZKFailoverController.java |  5 ++--
 .../io/serializer/TestSerializationFactory.java |  6 ++--
 .../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 -
 .../java/org/apache/hadoop/ipc/TestIPC.java |  5 ++--
 .../apache/hadoop/ipc/TestMiniRPCBenchmark.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 16 +--
 .../hadoop/security/TestGroupFallback.java  | 12 
 .../hadoop/security/TestUGIWithMiniKdc.java |  2 +-
 .../security/TestUserGroupInformation.java  |  2 +-
 .../hadoop/security/ssl/TestSSLFactory.java |  2 +-
 .../delegation/web/TestWebDelegationToken.java  |  2 +-
 .../apache/hadoop/test/GenericTestUtils.java| 30 
 .../hadoop/test/TestGenericTestUtils.java   | 10 +++
 .../hadoop/util/Crc32PerformanceTest.java   | 11 +++
 hadoop-common-project/hadoop-nfs/pom.xml|  6 
 .../apache/hadoop/oncrpc/TestFrameDecoder.java  |  6 ++--
 .../TestDFSStripedOutputStreamWithFailure.java  | 28 ++
 25 files changed, 132 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index c1de27a..fbd598c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -30,6 +29,7 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
 
 import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
 
 /**
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
index dff89f9..240989e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -32,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
@@ -61,8 +63,7 @@ public abstract class FileContextPermissionBase {
   
   {
 try {
-  ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
-  

[46/50] [abbrv] hadoop git commit: YARN-5948. Implement MutableConfigurationManager for handling storage into configuration store

2017-07-07 Thread xgong
YARN-5948. Implement MutableConfigurationManager for handling storage into 
configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46bde8ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46bde8ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46bde8ec

Branch: refs/heads/YARN-5734
Commit: 46bde8ec582edd036cda984b4ca889f3b8addcfb
Parents: aea6084
Author: Jonathan Hung 
Authored: Wed Mar 1 16:03:01 2017 -0800
Committer: Xuan 
Committed: Fri Jul 7 14:12:46 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 ++
 .../src/main/resources/yarn-default.xml | 12 +++
 .../scheduler/MutableConfigurationProvider.java | 35 
 .../scheduler/capacity/CapacityScheduler.java   | 14 ++-
 .../CapacitySchedulerConfiguration.java |  3 +
 .../capacity/conf/CSConfigurationProvider.java  |  3 +-
 .../conf/MutableCSConfigurationProvider.java| 94 
 .../conf/YarnConfigurationStoreFactory.java | 46 ++
 .../TestMutableCSConfigurationProvider.java | 83 +
 9 files changed, 291 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46bde8ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a6d3360..f21c1f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -612,6 +612,12 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
   "org.apache.hadoop.yarn.LocalConfigurationProvider";
 
+  public static final String SCHEDULER_CONFIGURATION_STORE_CLASS =
+  YARN_PREFIX + "scheduler.configuration.store.class";
+  public static final String MEMORY_CONFIGURATION_STORE = "memory";
+  public static final String DEFAULT_CONFIGURATION_STORE =
+  MEMORY_CONFIGURATION_STORE;
+
   public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
   + "authorization-provider";
   private static final List RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46bde8ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 81c9cb2..412248e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3116,4 +3116,16 @@
 false
   
 
+  
+
+  The type of configuration store to use for storing scheduler
+  configurations, if using a mutable configuration provider.
+  Keywords such as "memory" map to certain configuration store
+  implementations. If keyword is not found, try to load this
+  value as a class.
+
+yarn.scheduler.configuration.store.class
+memory
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46bde8ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
new file mode 100644
index 000..da30a2b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with 

[12/50] [abbrv] hadoop git commit: HADOOP-14611. NetworkTopology.DEFAULT_HOST_LEVEL is unused (Contributed by Chen Liang via Daniel Templeton)

2017-07-07 Thread xgong
HADOOP-14611. NetworkTopology.DEFAULT_HOST_LEVEL is unused
(Contributed by Chen Liang via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a75f738
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a75f738
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a75f738

Branch: refs/heads/YARN-5734
Commit: 5a75f73893567151f525950cc1a15b3f1bfeac26
Parents: b08cc97
Author: Daniel Templeton 
Authored: Thu Jun 29 12:28:43 2017 -0700
Committer: Daniel Templeton 
Committed: Thu Jun 29 12:28:43 2017 -0700

--
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a75f738/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 1018d58..278bf72 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -45,7 +45,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 @InterfaceStability.Unstable
 public class NetworkTopology {
   public final static String DEFAULT_RACK = "/default-rack";
-  public final static int DEFAULT_HOST_LEVEL = 2;
   public static final Logger LOG =
   LoggerFactory.getLogger(NetworkTopology.class);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: Add release notes, changes, jdiff for 3.0.0-alpha4

2017-07-07 Thread xgong
Add release notes, changes, jdiff for 3.0.0-alpha4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f10864a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f10864a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f10864a8

Branch: refs/heads/YARN-5734
Commit: f10864a820c5104d748378aa1c2c408e4aad8a6c
Parents: 7cd0952
Author: Andrew Wang 
Authored: Fri Jul 7 11:01:59 2017 -0700
Committer: Andrew Wang 
Committed: Fri Jul 7 11:01:59 2017 -0700

--
 .../3.0.0-alpha4/CHANGES.3.0.0-alpha4.md| 880 +++
 .../3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md   | 492 +++
 .../jdiff/Apache_Hadoop_HDFS_3.0.0-alpha4.xml   | 322 +++
 3 files changed, 1694 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is incompatible with DB2. Contributed by ramtin and Gergely Novák.

2017-07-07 Thread xgong
MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is 
incompatible with DB2. Contributed by ramtin and Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f484a6ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f484a6ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f484a6ff

Branch: refs/heads/YARN-5734
Commit: f484a6ff602d48413556a1d046670e2003c71c2e
Parents: f10864a
Author: Junping Du 
Authored: Fri Jul 7 13:23:43 2017 -0700
Committer: Junping Du 
Committed: Fri Jul 7 13:26:16 2017 -0700

--
 .../hadoop/mapreduce/lib/db/DBOutputFormat.java | 15 ++-
 .../mapreduce/lib/db/TestDBOutputFormat.java| 45 
 2 files changed, 58 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f484a6ff/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
index 2e3a9d8..c222bf5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DatabaseMetaData;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 
@@ -51,6 +52,8 @@ public class DBOutputFormat
 extends OutputFormat {
 
   private static final Log LOG = LogFactory.getLog(DBOutputFormat.class);
+  public String dbProductName = "DEFAULT";
+
   public void checkOutputSpecs(JobContext context) 
   throws IOException, InterruptedException {}
 
@@ -158,7 +161,12 @@ extends OutputFormat {
 query.append(",");
   }
 }
-query.append(");");
+
+if (dbProductName.startsWith("DB2") || dbProductName.startsWith("ORACLE")) 
{
+  query.append(")");
+} else {
+  query.append(");");
+}
 
 return query.toString();
   }
@@ -177,7 +185,10 @@ extends OutputFormat {
 try {
   Connection connection = dbConf.getConnection();
   PreparedStatement statement = null;
-  
+
+  DatabaseMetaData dbMeta = connection.getMetaData();
+  this.dbProductName = dbMeta.getDatabaseProductName().toUpperCase();
+
   statement = connection.prepareStatement(
 constructQuery(tableName, fieldNames));
   return new DBRecordWriter(connection, statement);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f484a6ff/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
index 014855f..e547c8a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
+import java.lang.reflect.Field;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
@@ -26,6 +28,7 @@ import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
 
 public class TestDBOutputFormat {
   private String[] fieldNames = new String[] { "id", "name", "value" };
@@ -47,6 +50,48 @@ public class TestDBOutputFormat {
   }
 
   @Test
+  public void testDB2ConstructQuery() {
+String db2expected = StringUtils.removeEnd(expected, ";");
+String 

[38/50] [abbrv] hadoop git commit: Revert "HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He."

2017-07-07 Thread xgong
Revert "HADOOP-14587. Use GenericTestUtils.setLogLevel when available in 
hadoop-common. Contributed by Wenxin He."

This reverts commit 82cb2a6497caa7c5e693aa41ad18e92f1c7eb16a.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fc5dcc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fc5dcc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fc5dcc2

Branch: refs/heads/YARN-5734
Commit: 8fc5dcc2a199c6b202e55c4cfdf5ae4eb09ef003
Parents: 8153fe2
Author: Akira Ajisaka 
Authored: Sat Jul 8 02:53:18 2017 +0900
Committer: Akira Ajisaka 
Committed: Sat Jul 8 02:53:18 2017 +0900

--
 .../fs/FileContextCreateMkdirBaseTest.java  |  2 +-
 .../hadoop/fs/FileContextPermissionBase.java|  5 ++--
 .../apache/hadoop/fs/FileContextUtilBase.java   |  5 ++--
 .../org/apache/hadoop/fs/TestListFiles.java |  4 +--
 .../fs/TestLocalFileSystemPermission.java   |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java  |  7 +++--
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  6 ++--
 .../hadoop/ha/TestZKFailoverController.java |  5 ++--
 .../io/serializer/TestSerializationFactory.java |  6 ++--
 .../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 +
 .../java/org/apache/hadoop/ipc/TestIPC.java |  5 ++--
 .../apache/hadoop/ipc/TestMiniRPCBenchmark.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 16 +--
 .../hadoop/security/TestGroupFallback.java  | 12 
 .../hadoop/security/TestUGIWithMiniKdc.java |  2 +-
 .../security/TestUserGroupInformation.java  |  2 +-
 .../hadoop/security/ssl/TestSSLFactory.java |  2 +-
 .../delegation/web/TestWebDelegationToken.java  |  2 +-
 .../apache/hadoop/test/GenericTestUtils.java| 30 
 .../hadoop/test/TestGenericTestUtils.java   | 10 ---
 .../hadoop/util/Crc32PerformanceTest.java   | 11 ---
 hadoop-common-project/hadoop-nfs/pom.xml|  6 
 .../apache/hadoop/oncrpc/TestFrameDecoder.java  |  6 ++--
 .../TestDFSStripedOutputStreamWithFailure.java  | 28 --
 25 files changed, 59 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index fbd598c..c1de27a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -29,7 +30,6 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
 
 import org.apache.hadoop.test.GenericTestUtils;
-import org.slf4j.event.Level;
 
 /**
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
index 240989e..dff89f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
@@ -23,7 +23,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -33,7 +32,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
@@ -63,7 +61,8 @@ public abstract class FileContextPermissionBase {
   
   {
 try {
-  GenericTestUtils.setLogLevel(FileSystem.LOG, 

[31/50] [abbrv] hadoop git commit: HADOOP-14617. Add ReflectionUtils.logThreadInfo that accept slf4j logger API. Contributed by Wenxin He.

2017-07-07 Thread xgong
HADOOP-14617. Add ReflectionUtils.logThreadInfo that accept slf4j logger API.
Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b17e655b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b17e655b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b17e655b

Branch: refs/heads/YARN-5734
Commit: b17e655b7069a6e9dab28d9f5fc34bc95a27e5d5
Parents: f2aba1d
Author: Steve Loughran 
Authored: Tue Jul 4 10:52:59 2017 +0100
Committer: Steve Loughran 
Committed: Tue Jul 4 11:41:07 2017 +0100

--
 .../org/apache/hadoop/util/ReflectionUtils.java | 30 
 .../apache/hadoop/util/TestReflectionUtils.java | 19 -
 2 files changed, 48 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17e655b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
index da14979..f1294e7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.serializer.Deserializer;
 import org.apache.hadoop.io.serializer.SerializationFactory;
 import org.apache.hadoop.io.serializer.Serializer;
+import org.slf4j.Logger;
 
 /**
  * General reflection utils
@@ -229,6 +230,35 @@ public class ReflectionUtils {
   }
 
   /**
+   * Log the current thread stacks at INFO level.
+   * @param log the logger that logs the stack trace
+   * @param title a descriptive title for the call stacks
+   * @param minInterval the minimum time from the last
+   */
+  public static void logThreadInfo(Logger log,
+   String title,
+   long minInterval) {
+boolean dumpStack = false;
+if (log.isInfoEnabled()) {
+  synchronized (ReflectionUtils.class) {
+long now = Time.now();
+if (now - previousLogTime >= minInterval * 1000) {
+  previousLogTime = now;
+  dumpStack = true;
+}
+  }
+  if (dumpStack) {
+try {
+  ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+  printThreadInfo(new PrintStream(buffer, false, "UTF-8"), title);
+  log.info(buffer.toString(Charset.defaultCharset().name()));
+} catch (UnsupportedEncodingException ignored) {
+}
+  }
+}
+  }
+
+  /**
* Return the correctly-typed {@link Class} of the given object.
*  
* @param o object whose correctly-typed Class is to be obtained

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17e655b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
index 56e86ef..62cd625 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
@@ -25,9 +25,14 @@ import java.net.URLClassLoader;
 import java.util.HashMap;
 import java.util.List;
 
+import static org.hamcrest.CoreMatchers.containsString;
 import static org.junit.Assert.*;
+
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestReflectionUtils {
 
@@ -150,7 +155,19 @@ public class TestReflectionUtils {
 assertTrue("Missing parent method", containsParentMethod);
 assertTrue("Missing child method", containsChildMethod);
   }
-  
+
+  @Test
+  public void testLogThreadInfo() throws Exception {
+Logger logger = LoggerFactory.getLogger(TestReflectionUtils.class);
+LogCapturer logCapturer = LogCapturer.captureLogs(logger);
+
+final String title = "title";
+ReflectionUtils.logThreadInfo(logger, title, 0L);
+
+assertThat(logCapturer.getOutput(),
+containsString("Process Thread Dump: " + title));
+  }
+
   // Used for testGetDeclaredFieldsIncludingInherited
   private class Parent {
 private int parentField;



[14/50] [abbrv] hadoop git commit: YARN-6751. Display reserved resources in web UI per queue (Contributed by Abdullah Yousufi via Daniel Templeton)

2017-07-07 Thread xgong
YARN-6751. Display reserved resources in web UI per queue
(Contributed by Abdullah Yousufi via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec975197
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec975197
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec975197

Branch: refs/heads/YARN-5734
Commit: ec975197799417a1d5727dedc395fe6c15c30eb2
Parents: 441378e
Author: Daniel Templeton 
Authored: Thu Jun 29 16:52:46 2017 -0700
Committer: Daniel Templeton 
Committed: Thu Jun 29 16:53:50 2017 -0700

--
 .../yarn/server/resourcemanager/scheduler/fair/FSQueue.java   | 7 +++
 .../yarn/server/resourcemanager/webapp/FairSchedulerPage.java | 6 --
 .../resourcemanager/webapp/dao/FairSchedulerQueueInfo.java| 6 ++
 3 files changed, 17 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec975197/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index 12b1b83..1016823 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -57,6 +57,7 @@ public abstract class FSQueue implements Queue, Schedulable {
 
   private Resource fairShare = Resources.createResource(0, 0);
   private Resource steadyFairShare = Resources.createResource(0, 0);
+  private Resource reservedResource = Resources.createResource(0, 0);
   private final String name;
   protected final FairScheduler scheduler;
   private final YarnAuthorizationProvider authorizer;
@@ -161,6 +162,12 @@ public abstract class FSQueue implements Queue, 
Schedulable {
 this.maxShare = maxShare;
   }
 
+  public Resource getReservedResource() {
+reservedResource.setMemorySize(metrics.getReservedMB());
+reservedResource.setVirtualCores(metrics.getReservedVirtualCores());
+return reservedResource;
+  }
+
   @Override
   public Resource getMaxShare() {
 return maxShare;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec975197/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
index 544275e..5f46841 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
@@ -75,7 +75,8 @@ public class FairSchedulerPage extends RmView {
   _("Num Active Applications:", qinfo.getNumActiveApplications()).
   _("Num Pending Applications:", qinfo.getNumPendingApplications()).
   _("Min Resources:", qinfo.getMinResources().toString()).
-  _("Max Resources:", qinfo.getMaxResources().toString());
+  _("Max Resources:", qinfo.getMaxResources().toString()).
+  _("Reserved Resources:", qinfo.getReservedResources().toString());
   int maxApps = qinfo.getMaxApplications();
   if (maxApps < Integer.MAX_VALUE) {
   ri._("Max Running Applications:", qinfo.getMaxApplications());
@@ -103,7 +104,8 @@ public class FairSchedulerPage extends RmView {
   ResponseInfo ri = info("\'" + qinfo.getQueueName() + "\' Queue Status").
   _("Used Resources:", qinfo.getUsedResources().toString()).
   _("Min Resources:", qinfo.getMinResources().toString()).
-  

[26/50] [abbrv] hadoop git commit: MAPREDUCE-6905. HADOOP_JOB_HISTORY_OPTS should be HADOOP_JOB_HISTORYSERVER_OPTS in mapred-config.sh. Contributed by LiXin Ge.

2017-07-07 Thread xgong
MAPREDUCE-6905. HADOOP_JOB_HISTORY_OPTS should be HADOOP_JOB_HISTORYSERVER_OPTS 
in mapred-config.sh. Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf1f5993
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf1f5993
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf1f5993

Branch: refs/heads/YARN-5734
Commit: bf1f59937dd5d860c9ed7fefce203d6a9f645182
Parents: fa1aaee
Author: Naganarasimha 
Authored: Sun Jul 2 15:56:27 2017 +0530
Committer: Naganarasimha 
Committed: Sun Jul 2 15:56:27 2017 +0530

--
 .../src/test/java/org/apache/hadoop/fs/TestDFSIO.java | 14 --
 1 file changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf1f5993/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
index 34eac83..12fbdad 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
@@ -294,8 +294,17 @@ public class TestDFSIO implements Tool {
   int nrFiles
 ) throws IOException {
 LOG.info("creating control file: "+nrBytes+" bytes, "+nrFiles+" files");
-
+final int maxDirItems = config.getInt(
+DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
+DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
 Path controlDir = getControlDir(config);
+
+if (nrFiles > maxDirItems) {
+  final String message = "The directory item limit of " + controlDir +
+  " is exceeded: limit=" + maxDirItems + " items=" + nrFiles;
+  throw new IOException(message);
+}
+
 fs.delete(controlDir, true);
 
 for(int i=0; i < nrFiles; i++) {
@@ -310,8 +319,9 @@ public class TestDFSIO implements Tool {
   } catch(Exception e) {
 throw new IOException(e.getLocalizedMessage());
   } finally {
-if (writer != null)
+if (writer != null) {
   writer.close();
+}
 writer = null;
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: Updating version for 3.0.0-beta1 development

2017-07-07 Thread xgong
Updating version for 3.0.0-beta1 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af2773f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af2773f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af2773f6

Branch: refs/heads/YARN-5734
Commit: af2773f609ba930825bab5d30767757c0e59aac7
Parents: 900221f
Author: Andrew Wang 
Authored: Thu Jun 29 17:57:40 2017 -0700
Committer: Andrew Wang 
Committed: Thu Jun 29 17:57:40 2017 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml| 4 ++--
 

[47/50] [abbrv] hadoop git commit: YARN-5952. Create REST API for changing YARN scheduler configurations. (Jonathan Hung via wangda)

2017-07-07 Thread xgong
YARN-5952. Create REST API for changing YARN scheduler configurations. 
(Jonathan Hung via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa52bab1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa52bab1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa52bab1

Branch: refs/heads/YARN-5734
Commit: fa52bab197ea9b278987a7350ff9c9e2a44a9577
Parents: 46bde8e
Author: Wangda Tan 
Authored: Mon Apr 3 10:12:01 2017 -0700
Committer: Xuan 
Committed: Fri Jul 7 14:12:47 2017 -0700

--
 .../scheduler/MutableConfScheduler.java |  40 ++
 .../scheduler/MutableConfigurationProvider.java |   5 +-
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../conf/InMemoryConfigurationStore.java|   6 +-
 .../conf/MutableCSConfigurationProvider.java|  24 +-
 .../resourcemanager/webapp/RMWebServices.java   | 169 +++
 .../webapp/dao/QueueConfigInfo.java |  57 +++
 .../webapp/dao/QueueConfigsUpdateInfo.java  |  60 +++
 .../TestMutableCSConfigurationProvider.java |   6 +-
 .../TestRMWebServicesConfigurationMutation.java | 477 +++
 10 files changed, 849 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa52bab1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
new file mode 100644
index 000..35e36e1
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.security.UserGroupInformation;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Interface for a scheduler that supports changing configuration at runtime.
+ *
+ */
+public interface MutableConfScheduler extends ResourceScheduler {
+
+  /**
+   * Update the scheduler's configuration.
+   * @param user Caller of this update
+   * @param confUpdate key-value map of the configuration update
+   * @throws IOException if update is invalid
+   */
+  void updateConfiguration(UserGroupInformation user,
+  Map confUpdate) throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa52bab1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index da30a2b..889c3bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ 

[44/50] [abbrv] hadoop git commit: YARN-5951. Changes to allow CapacityScheduler to use configuration store

2017-07-07 Thread xgong
YARN-5951. Changes to allow CapacityScheduler to use configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5275488
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5275488
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5275488

Branch: refs/heads/YARN-5734
Commit: b5275488fadd5e4b314686ce5b1d4688b69dbd72
Parents: f484a6f
Author: Jonathan Hung 
Authored: Mon Jan 30 19:03:48 2017 -0800
Committer: Xuan 
Committed: Fri Jul 7 14:12:44 2017 -0700

--
 .../scheduler/capacity/CapacityScheduler.java   | 36 +--
 .../CapacitySchedulerConfiguration.java | 10 +++
 .../capacity/conf/CSConfigurationProvider.java  | 46 ++
 .../conf/FileBasedCSConfigurationProvider.java  | 67 
 .../scheduler/capacity/conf/package-info.java   | 29 +
 .../capacity/TestCapacityScheduler.java |  4 +-
 6 files changed, 170 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5275488/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d3186da..a925904 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -104,6 +103,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -162,6 +163,7 @@ public class CapacityScheduler extends
 
   private int offswitchPerHeartbeatLimit;
 
+  private CSConfigurationProvider csConfProvider;
 
   @Override
   public void setConf(Configuration conf) {
@@ -285,7 +287,18 @@ public class CapacityScheduler extends
   IOException {
 try {
   writeLock.lock();
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  String confProviderStr = configuration.get(
+  CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+  CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
+  if (confProviderStr.equals(
+  CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
+this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
+  } else {
+throw new IOException("Invalid CS configuration provider: " +
+confProviderStr);
+  }
+  this.csConfProvider.init(configuration);
+  this.conf = this.csConfProvider.loadConfiguration(configuration);
   validateConf(this.conf);
   this.minimumAllocation = this.conf.getMinimumAllocation();
   initMaximumResourceCapability(this.conf.getMaximumAllocation());
@@ -392,7 +405,7 @@ public class CapacityScheduler extends
   writeLock.lock();
   Configuration configuration = new Configuration(newConf);
   CapacitySchedulerConfiguration oldConf = this.conf;
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  this.conf = csConfProvider.loadConfiguration(configuration);
  

[34/50] [abbrv] hadoop git commit: HADOOP-14608. KMS JMX servlet path not backwards compatible. Contributed by John Zhuge.

2017-07-07 Thread xgong
HADOOP-14608. KMS JMX servlet path not backwards compatible. Contributed by 
John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/946dd256
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/946dd256
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/946dd256

Branch: refs/heads/YARN-5734
Commit: 946dd256755109ca57d9cfa0912eef8402450181
Parents: 6436768
Author: John Zhuge 
Authored: Fri Jun 30 11:12:29 2017 -0700
Committer: John Zhuge 
Committed: Wed Jul 5 11:16:56 2017 -0700

--
 .../main/resources/webapps/kms/WEB-INF/web.xml  | 10 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 79 
 2 files changed, 89 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/946dd256/hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml
 
b/hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml
index 1c14d28..737236c 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml
+++ 
b/hadoop-common-project/hadoop-kms/src/main/resources/webapps/kms/WEB-INF/web.xml
@@ -40,11 +40,21 @@
 1
   
 
+  
+jmx-servlet
+org.apache.hadoop.jmx.JMXJsonServlet
+  
+
   
 webservices-driver
 /kms/*
   
 
+  
+jmx-servlet
+/kms/jmx
+  
+
   
 authFilter
 
org.apache.hadoop.crypto.key.kms.server.KMSAuthenticationFilter

http://git-wip-us.apache.org/repos/asf/hadoop/blob/946dd256/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index dc5f83f..a45906a 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -40,10 +40,12 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.http.client.utils.URIBuilder;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -55,6 +57,7 @@ import org.mockito.internal.util.reflection.Whitebox;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.net.ssl.HttpsURLConnection;
 import javax.security.auth.login.AppConfigurationEntry;
 
 import java.io.ByteArrayInputStream;
@@ -62,6 +65,7 @@ import java.io.DataInputStream;
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.Writer;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -69,6 +73,8 @@ import java.net.ServerSocket;
 import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.URL;
+import java.net.URLConnection;
+import java.security.GeneralSecurityException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -83,6 +89,8 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.LinkedBlockingQueue;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -97,6 +105,8 @@ public class TestKMS {
   private static final String SSL_RELOADER_THREAD_NAME =
   "Truststore reloader thread";
 
+  private SSLFactory sslFactory;
+
   @Rule
   public final Timeout testTimeout = new Timeout(18);
 
@@ -317,6 +327,57 @@ public class TestKMS {
 }
   }
 
+  /**
+   * Read in the content from an URL connection.
+   * @param conn URLConnection To read
+   * @return the text from the output
+   * @throws IOException if something went wrong
+   */
+  private static String readOutput(URLConnection conn) throws IOException {
+StringBuilder out = new StringBuilder();
+InputStream in = conn.getInputStream();
+byte[] buffer = new byte[64 * 1024];

[30/50] [abbrv] hadoop git commit: HADOOP-14571. Deprecate public APIs relate to log4j1

2017-07-07 Thread xgong
HADOOP-14571. Deprecate public APIs relate to log4j1

This closes #244

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2aba1da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2aba1da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2aba1da

Branch: refs/heads/YARN-5734
Commit: f2aba1da30aae482a2d65696493b609948f9b904
Parents: 1aaa7f1
Author: Wenxin He 
Authored: Tue Jun 27 11:51:34 2017 +0800
Committer: Akira Ajisaka 
Committed: Tue Jul 4 18:55:20 2017 +0900

--
 .../main/java/org/apache/hadoop/io/IOUtils.java |  3 ++
 .../java/org/apache/hadoop/util/LogAdapter.java |  4 +++
 .../apache/hadoop/test/GenericTestUtils.java| 33 
 3 files changed, 40 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2aba1da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index e24f196..a3bccef 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -246,7 +246,10 @@ public class IOUtils {
*
* @param log the log to record problems to at debug level. Can be null.
* @param closeables the objects to close
+   * @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
+   * instead
*/
+  @Deprecated
   public static void cleanup(Log log, java.io.Closeable... closeables) {
 for (java.io.Closeable c : closeables) {
   if (c != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2aba1da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
index 6ef9093..b2bcbf5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
@@ -32,6 +32,10 @@ class LogAdapter {
 this.LOGGER = LOGGER;
   }
 
+  /**
+   * @deprecated use {@link #create(Logger)} instead
+   */
+  @Deprecated
   public static LogAdapter create(Log LOG) {
 return new LogAdapter(LOG);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2aba1da/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 00dc7f2..77a79ff 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -94,20 +94,33 @@ public abstract class GenericTestUtils {
   public static final String ERROR_INVALID_ARGUMENT =
   "Total wait time should be greater than check interval time";
 
+  /**
+   * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
+   */
+  @Deprecated
   @SuppressWarnings("unchecked")
   public static void disableLog(Log log) {
 // We expect that commons-logging is a wrapper around Log4j.
 disableLog((Log4JLogger) log);
   }
 
+  @Deprecated
   public static Logger toLog4j(org.slf4j.Logger logger) {
 return LogManager.getLogger(logger.getName());
   }
 
+  /**
+   * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
+   */
+  @Deprecated
   public static void disableLog(Log4JLogger log) {
 log.getLogger().setLevel(Level.OFF);
   }
 
+  /**
+   * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
+   */
+  @Deprecated
   public static void disableLog(Logger logger) {
 logger.setLevel(Level.OFF);
   }
@@ -116,20 +129,40 @@ public abstract class GenericTestUtils {
 disableLog(toLog4j(logger));
   }
 
+  /**
+   * @deprecated
+   * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
+   */
+  @Deprecated
   @SuppressWarnings("unchecked")
   public static void setLogLevel(Log log, Level level) {
 // We expect that commons-logging is a wrapper around Log4j.
 

[16/50] [abbrv] hadoop git commit: HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.

2017-07-07 Thread xgong
HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/900221f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/900221f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/900221f9

Branch: refs/heads/YARN-5734
Commit: 900221f95ea9fe1936b4d5f277e6047ee8734eca
Parents: 72993b3
Author: Arpit Agarwal 
Authored: Thu Jun 29 17:15:13 2017 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 29 17:15:13 2017 -0700

--
 .../server/blockmanagement/BlockManager.java| 13 ++-
 .../PendingReconstructionBlocks.java|  8 +-
 .../namenode/metrics/NameNodeMetrics.java   | 18 
 .../TestPendingReconstruction.java  | 86 +++-
 4 files changed, 118 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/900221f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a0c4698..a5ee30b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1851,7 +1851,7 @@ public class BlockManager implements BlockStatsMXBean {
 (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
-  private BlockReconstructionWork scheduleReconstruction(BlockInfo block,
+  BlockReconstructionWork scheduleReconstruction(BlockInfo block,
   int priority) {
 // skip abandoned block or block reopened for append
 if (block.isDeleted() || !block.isCompleteOrCommitted()) {
@@ -1873,6 +1873,7 @@ public class BlockManager implements BlockStatsMXBean {
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be reconstructed from any node
   LOG.debug("Block {} cannot be reconstructed from any node", block);
+  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1885,6 +1886,7 @@ public class BlockManager implements BlockStatsMXBean {
   neededReconstruction.remove(block, priority);
   blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
   " it has enough replicas", block);
+  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1900,6 +1902,7 @@ public class BlockManager implements BlockStatsMXBean {
 if (block.isStriped()) {
   if (pendingNum > 0) {
 // Wait the previous reconstruction to finish.
+NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
 return null;
   }
 
@@ -3727,8 +3730,8 @@ public class BlockManager implements BlockStatsMXBean {
* The given node is reporting that it received a certain block.
*/
   @VisibleForTesting
-  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
-  throws IOException {
+  public void addBlock(DatanodeStorageInfo storageInfo, Block block,
+  String delHint) throws IOException {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 // Decrement number of blocks scheduled to this datanode.
 // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
@@ -3751,7 +3754,9 @@ public class BlockManager implements BlockStatsMXBean {
 BlockInfo storedBlock = getStoredBlock(block);
 if (storedBlock != null &&
 block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
-  pendingReconstruction.decrement(storedBlock, node);
+  if (pendingReconstruction.decrement(storedBlock, node)) {
+NameNode.getNameNodeMetrics().incSuccessfulReReplications();
+  }
 }
 processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
 delHintNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/900221f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
index 2221d1d..0f20daa 100644
--- 

[28/50] [abbrv] hadoop git commit: HDFS-12078. Add time unit to the description of property dfs.namenode.stale.datanode.interval in hdfs-default.xml. Contributed by Weiwei Yang.

2017-07-07 Thread xgong
HDFS-12078. Add time unit to the description of property 
dfs.namenode.stale.datanode.interval in hdfs-default.xml. Contributed by Weiwei 
Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/186650d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/186650d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/186650d2

Branch: refs/heads/YARN-5734
Commit: 186650d21d482e2f3bc4523ae989ebe76081b0e3
Parents: b0560e0
Author: Akira Ajisaka 
Authored: Tue Jul 4 14:51:52 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Jul 4 14:51:52 2017 +0900

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/186650d2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 96c04f0..4caee9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1811,8 +1811,8 @@
   dfs.namenode.stale.datanode.interval
   3
   
-Default time interval for marking a datanode as "stale", i.e., if 
-the namenode has not received heartbeat msg from a datanode for 
+Default time interval in milliseconds for marking a datanode as "stale",
+i.e., if the namenode has not received heartbeat msg from a datanode for
 more than this time interval, the datanode will be marked and treated 
 as "stale" by default. The stale interval cannot be too small since 
 otherwise this may cause too frequent change of stale states. 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: HADOOP-14297. Update the documentation about the new ec codecs config keys. Contributed by Kai Sasaki.

2017-07-07 Thread xgong
HADOOP-14297. Update the documentation about the new ec codecs config keys. 
Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9d8bdfd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9d8bdfd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9d8bdfd

Branch: refs/heads/YARN-5734
Commit: e9d8bdfdf576340196843dae92551cc36a87e95f
Parents: d6df0fd
Author: Wei-Chiu Chuang 
Authored: Wed Jun 28 13:53:15 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed Jun 28 13:53:54 2017 -0700

--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md   | 8 +++-
 1 file changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9d8bdfd/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 69e8ef2..1c0a2de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -123,7 +123,7 @@ Deployment
   `io.erasurecode.codec.xor.rawcoders` for the XOR codec.
   User can also configure self-defined codec with configuration key like:
   `io.erasurecode.codec.self-defined-codec.rawcoders`.
-  The values for these key are lists of coder names with a fall-back mechanism.
+  The values for these key are lists of coder names with a fall-back 
mechanism. These codec factories are loaded in the order specified by the 
configuration values, until a codec is loaded successfully. The default RS and 
XOR codec configuration prefers native implementation over the pure Java one. 
There is no RS-LEGACY native codec implementation so the default is pure Java 
implementation only.
   All these codecs have implementations in pure Java. For default RS codec, 
there is also a native implementation which leverages Intel ISA-L library to 
improve the performance of codec. For XOR codec, a native implementation which 
leverages Intel ISA-L library to improve the performance of codec is also 
supported. Please refer to section "Enable Intel ISA-L" for more detail 
information.
   The default implementation for RS Legacy is pure Java, and the default 
implementations for default RS and XOR are native implementations using Intel 
ISA-L library.
 
@@ -138,13 +138,11 @@ Deployment
 
   HDFS native implementation of default RS codec leverages Intel ISA-L library 
to improve the encoding and decoding calculation. To enable and use Intel 
ISA-L, there are three steps.
   1. Build ISA-L library. Please refer to the official site 
"https://github.com/01org/isa-l/; for detail information.
-  2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build 
options" section in "Build instructions for Hadoop" in (BUILDING.txt) in the 
source code. Use `-Dbundle.isal` to copy the contents of the `isal.lib` 
directory into the final tar file. Deploy Hadoop with the tar file. Make sure 
ISA-L is available on HDFS clients and DataNodes.
-  3. Configure the `io.erasurecode.codec.rs.rawcoder` key with value 
`org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory` on 
HDFS clients and DataNodes.
+  2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build 
options" section in "Build instructions for Hadoop" in (BUILDING.txt) in the 
source code.
+  3. Use `-Dbundle.isal` to copy the contents of the `isal.lib` directory into 
the final tar file. Deploy Hadoop with the tar file. Make sure ISA-L is 
available on HDFS clients and DataNodes.
 
   To verify that ISA-L is correctly detected by Hadoop, run the `hadoop 
checknative` command.
 
-  To enable the native implementation of the XOR codec, perform the same first 
two steps as above to build and deploy Hadoop with ISA-L support. Afterwards, 
configure the `io.erasurecode.codec.xor.rawcoder` key with 
`org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory` on 
both HDFS client and DataNodes.
-
 ### Administrative commands
 
   HDFS provides an `ec` subcommand to perform administrative commands related 
to erasure coding.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: YARN-6575. Support global configuration mutation in MutableConfProvider. (Jonathan Hung via Xuan Gong)

2017-07-07 Thread xgong
YARN-6575. Support global configuration mutation in MutableConfProvider. 
(Jonathan Hung via Xuan Gong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f22ce694
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f22ce694
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f22ce694

Branch: refs/heads/YARN-5734
Commit: f22ce6945fc237aeff8a07fc407962597fd9ff3f
Parents: ee373da
Author: Xuan 
Authored: Mon Jun 5 16:30:38 2017 -0700
Committer: Xuan 
Committed: Fri Jul 7 14:12:49 2017 -0700

--
 .../ConfigurationMutationACLPolicy.java |   4 +-
 .../DefaultConfigurationMutationACLPolicy.java  |   4 +-
 .../scheduler/MutableConfScheduler.java |   4 +-
 .../scheduler/MutableConfigurationProvider.java |   4 +-
 .../scheduler/capacity/CapacityScheduler.java   |   4 +-
 .../conf/MutableCSConfigurationProvider.java|  10 +-
 ...ueueAdminConfigurationMutationACLPolicy.java |  22 +++-
 .../resourcemanager/webapp/RMWebServices.java   |   4 +-
 .../webapp/dao/QueueConfigsUpdateInfo.java  |  60 ---
 .../webapp/dao/SchedConfUpdateInfo.java |  69 +
 .../TestConfigurationMutationACLPolicies.java   |  28 -
 .../TestMutableCSConfigurationProvider.java |  10 +-
 .../TestRMWebServicesConfigurationMutation.java | 101 +--
 13 files changed, 205 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f22ce694/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
index 724487b..3a388fe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
@@ -21,7 +21,7 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 
 /**
  * Interface for determining whether configuration mutations are allowed.
@@ -41,7 +41,7 @@ public interface ConfigurationMutationACLPolicy {
* @param confUpdate configurations to be updated
* @return whether provided mutation is allowed or not
*/
-  boolean isMutationAllowed(UserGroupInformation user, QueueConfigsUpdateInfo
+  boolean isMutationAllowed(UserGroupInformation user, SchedConfUpdateInfo
   confUpdate);
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f22ce694/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
index 680c3b8..6648668 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import 

[50/50] [abbrv] hadoop git commit: YARN-5953:Create CLI for changing YARN configurations. (Jonathan Hung via xgong)

2017-07-07 Thread xgong
YARN-5953:Create CLI for changing YARN configurations. (Jonathan Hung via xgong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9aeeb554
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9aeeb554
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9aeeb554

Branch: refs/heads/YARN-5734
Commit: 9aeeb554a9080c168450ab0fedfde8711b1c4f72
Parents: f22ce69
Author: Xuan 
Authored: Fri Jul 7 14:16:46 2017 -0700
Committer: Xuan 
Committed: Fri Jul 7 14:16:46 2017 -0700

--
 hadoop-yarn-project/hadoop-yarn/bin/yarn|   4 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd|   5 +
 .../hadoop/yarn/client/cli/SchedConfCLI.java| 238 +++
 .../yarn/client/cli/TestSchedConfCLI.java   | 160 +
 .../hadoop/yarn/webapp/dao/package-info.java|  27 +++
 .../yarn/webapp/util/YarnWebServiceUtils.java   |  14 ++
 .../ConfigurationMutationACLPolicy.java |   2 +-
 .../DefaultConfigurationMutationACLPolicy.java  |   2 +-
 .../scheduler/MutableConfScheduler.java |   2 +-
 .../scheduler/MutableConfigurationProvider.java |   2 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +-
 .../conf/MutableCSConfigurationProvider.java|   4 +-
 ...ueueAdminConfigurationMutationACLPolicy.java |   4 +-
 .../resourcemanager/webapp/RMWebServices.java   |   1 +
 .../webapp/dao/QueueConfigInfo.java |   4 +-
 .../webapp/dao/SchedConfUpdateInfo.java |  18 +-
 .../TestConfigurationMutationACLPolicies.java   |   4 +-
 .../TestMutableCSConfigurationProvider.java |   4 +-
 .../TestRMWebServicesConfigurationMutation.java |  65 +++--
 19 files changed, 508 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9aeeb554/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index cf6457b..21656fe 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -46,6 +46,7 @@ function hadoop_usage
   hadoop_add_subcommand "queue" "prints queue information"
   hadoop_add_subcommand "resourcemanager" "run the ResourceManager"
   hadoop_add_subcommand "rmadmin" "admin tools"
+  hadoop_add_subcommand "schedconf" "modify scheduler configuration"
   hadoop_add_subcommand "scmadmin" "SharedCacheManager admin tools"
   hadoop_add_subcommand "sharedcachemanager" "run the SharedCacheManager 
daemon"
   hadoop_add_subcommand "timelinereader" "run the timeline reader server"
@@ -137,6 +138,9 @@ function yarncmd_case
 rmadmin)
   HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
 ;;
+schedconf)
+  HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.SchedConfCLI'
+;;
 scmadmin)
   HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin'
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9aeeb554/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index ca879f5..8b72394 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -285,6 +285,11 @@ goto :eof
   set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
   goto :eof
 
+:schedconf
+  set CLASS=org.apache.hadoop.yarn.client.cli.SchedConfCLI
+  set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+  goto :eof
+
 @rem This changes %1, %2 etc. Hence those cannot be used after calling this.
 :make_command_arguments
   if "%1" == "--config" (

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9aeeb554/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
new file mode 100644
index 000..e17062e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -0,0 +1,238 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with 

[33/50] [abbrv] hadoop git commit: HDFS-12089. Fix ambiguous NN retry log message in WebHDFS. Contributed by Eric Badger

2017-07-07 Thread xgong
HDFS-12089. Fix ambiguous NN retry log message in WebHDFS. Contributed by Eric 
Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6436768b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6436768b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6436768b

Branch: refs/heads/YARN-5734
Commit: 6436768baf1b2ac05f6786edcd76fd3a66c03eaa
Parents: a180ba4
Author: Mingliang Liu 
Authored: Wed Jul 5 11:10:57 2017 -0700
Committer: Mingliang Liu 
Committed: Wed Jul 5 11:10:57 2017 -0700

--
 .../main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6436768b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index a9bc795..3861cba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -792,7 +792,7 @@ public class WebHdfsFileSystem extends FileSystem
   a.action == 
RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY;
 
   if (isRetry || isFailoverAndRetry) {
-LOG.info("Retrying connect to namenode: {}. Already tried {}"
+LOG.info("Retrying connect to namenode: {}. Already retried {}"
 + " time(s); retry policy is {}, delay {}ms.",
 nnAddr, retry, retryPolicy, a.delayMillis);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: YARN-5067 Support specifying resources for AM containers in SLS. (Yufei Gu via Haibo Chen)

2017-07-07 Thread xgong
YARN-5067 Support specifying resources for AM containers in SLS. (Yufei Gu via 
Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/147df300
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/147df300
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/147df300

Branch: refs/heads/YARN-5734
Commit: 147df300bf00b5f4ed250426b6ccdd69085466da
Parents: 38996fd
Author: Haibo Chen 
Authored: Fri Jun 30 16:50:06 2017 -0700
Committer: Haibo Chen 
Committed: Fri Jun 30 17:03:44 2017 -0700

--
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 38 +++
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  | 39 +++-
 .../yarn/sls/appmaster/MRAMSimulator.java   | 11 +++---
 .../hadoop/yarn/sls/conf/SLSConfiguration.java  | 15 
 .../yarn/sls/appmaster/TestAMSimulator.java |  4 +-
 5 files changed, 68 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/147df300/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 02da056..a534f03 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -406,7 +406,7 @@ public class SLSRunner extends Configured implements Tool {
 }
 
 runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
-getTaskContainers(jsonJob), null);
+getTaskContainers(jsonJob), null, getAMContainerResource(jsonJob));
   }
 
   private List getTaskContainers(Map jsonJob)
@@ -558,7 +558,8 @@ public class SLSRunner extends Configured implements Tool {
 
 // Only supports the default job type currently
 runNewAM(SLSUtils.DEFAULT_JOB_TYPE, user, jobQueue, oldJobId,
-jobStartTimeMS, jobFinishTimeMS, containerList, null);
+jobStartTimeMS, jobFinishTimeMS, containerList, null,
+getAMContainerResource(null));
   }
 
   private Resource getDefaultContainerResource() {
@@ -676,7 +677,8 @@ public class SLSRunner extends Configured implements Tool {
 }
 
 runNewAM(SLSUtils.DEFAULT_JOB_TYPE, user, jobQueue, oldJobId,
-jobStartTimeMS, jobFinishTimeMS, containerList, rr);
+jobStartTimeMS, jobFinishTimeMS, containerList, rr,
+getAMContainerResource(null));
   }
 } finally {
   stjp.close();
@@ -684,6 +686,26 @@ public class SLSRunner extends Configured implements Tool {
 
   }
 
+  private Resource getAMContainerResource(Map jsonJob) {
+Resource amContainerResource =
+SLSConfiguration.getAMContainerResource(getConf());
+
+if (jsonJob == null) {
+  return amContainerResource;
+}
+
+if (jsonJob.containsKey("am.memory")) {
+  amContainerResource.setMemorySize(
+  Long.parseLong(jsonJob.get("am.memory").toString()));
+}
+
+if (jsonJob.containsKey("am.vcores")) {
+  amContainerResource.setVirtualCores(
+  Integer.parseInt(jsonJob.get("am.vcores").toString()));
+}
+return amContainerResource;
+  }
+
   private void increaseQueueAppNum(String queue) throws YarnException {
 SchedulerWrapper wrapper = (SchedulerWrapper)rm.getResourceScheduler();
 String queueName = wrapper.getRealQueueName(queue);
@@ -700,7 +722,7 @@ public class SLSRunner extends Configured implements Tool {
   private void runNewAM(String jobType, String user,
   String jobQueue, String oldJobId, long jobStartTimeMS,
   long jobFinishTimeMS, List containerList,
-  ReservationSubmissionRequest rr) {
+  ReservationSubmissionRequest rr, Resource amContainerResource) {
 
 AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(
 amClassMap.get(jobType), new Configuration());
@@ -710,9 +732,11 @@ public class SLSRunner extends Configured implements Tool {
   SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS,
   SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS_DEFAULT);
   boolean isTracked = trackedApps.contains(oldJobId);
-  amSim.init(AM_ID++, heartbeatInterval, containerList,
-  rm, this, jobStartTimeMS, jobFinishTimeMS, user, jobQueue,
-  isTracked, oldJobId, rr, runner.getStartTimeMS());
+  AM_ID++;
+
+  amSim.init(heartbeatInterval, containerList, rm, this, jobStartTimeMS,
+  jobFinishTimeMS, user, jobQueue, isTracked, oldJobId, rr,
+  runner.getStartTimeMS(), amContainerResource);
   runner.schedule(amSim);
   maxRuntime = 

[01/50] [abbrv] hadoop git commit: YARN-6743. yarn.resourcemanager.zk-max-znode-size.bytes description needs spaces in yarn-default.xml (Contributed by Lori Loberg via Daniel Templeton) [Forced Update

2017-07-07 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 a48d47572 -> 9aeeb554a (forced update)


YARN-6743. yarn.resourcemanager.zk-max-znode-size.bytes description needs 
spaces in yarn-default.xml
(Contributed by Lori Loberg via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25d891a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25d891a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25d891a7

Branch: refs/heads/YARN-5734
Commit: 25d891a784304fcf02f57bc7984c31af45003553
Parents: f99b6d1
Author: Daniel Templeton 
Authored: Wed Jun 28 13:17:58 2017 -0700
Committer: Daniel Templeton 
Committed: Wed Jun 28 13:17:58 2017 -0700

--
 .../src/main/resources/yarn-default.xml   | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25d891a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index d4b7bde..cbd5345 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -629,11 +629,11 @@
   
 
   
-   Specifies the maximum size of the data that can be stored
-   in a znode.Value should be same or less than jute.maxbuffer 
configured
-   in zookeeper.Default value configured is 1MB.
-   yarn.resourcemanager.zk-max-znode-size.bytes
-   1048576
+Specifies the maximum size of the data that can be stored
+  in a znode. Value should be same or less than jute.maxbuffer configured
+  in zookeeper. Default value configured is 1MB.
+yarn.resourcemanager.zk-max-znode-size.bytes
+1048576
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: MAPREDUCE-6536. hadoop-pipes doesn't use maven properties for openssl

2017-07-07 Thread xgong
MAPREDUCE-6536. hadoop-pipes doesn't use maven properties for openssl

Signed-off-by: Ravi Prakash 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20ba86d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20ba86d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20ba86d6

Branch: refs/heads/YARN-5734
Commit: 20ba86d66a47492aa2488d01c6c7cc4fcbef1673
Parents: c1edca1
Author: Allen Wittenauer 
Authored: Wed Jun 28 11:53:09 2017 -0700
Committer: Allen Wittenauer 
Committed: Wed Jun 28 17:33:44 2017 -0700

--
 hadoop-tools/hadoop-pipes/pom.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20ba86d6/hadoop-tools/hadoop-pipes/pom.xml
--
diff --git a/hadoop-tools/hadoop-pipes/pom.xml 
b/hadoop-tools/hadoop-pipes/pom.xml
index 1061d9c..457f3d3 100644
--- a/hadoop-tools/hadoop-pipes/pom.xml
+++ b/hadoop-tools/hadoop-pipes/pom.xml
@@ -39,6 +39,9 @@
   
 false
   
+  
+
+  
   
 
   
@@ -53,6 +56,7 @@
   ${basedir}/src
   
 
${sun.arch.data.model}
+${openssl.prefix} 
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: HADOOP-13414. Hide Jetty Server version header in HTTP responses. Contributed by Surendra Singth Lilhore.

2017-07-07 Thread xgong
HADOOP-13414. Hide Jetty Server version header in HTTP responses. Contributed 
by Surendra Singth Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a180ba40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a180ba40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a180ba40

Branch: refs/heads/YARN-5734
Commit: a180ba408128b2d916822e78deb979bbcd1894da
Parents: b17e655
Author: Vinayakumar B 
Authored: Wed Jul 5 16:05:18 2017 +0530
Committer: Vinayakumar B 
Committed: Wed Jul 5 16:05:18 2017 +0530

--
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a180ba40/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 0891e8e..d7436b2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -431,6 +431,7 @@ public final class HttpServer2 implements FilterContainer {
   HttpConfiguration httpConfig = new HttpConfiguration();
   httpConfig.setRequestHeaderSize(requestHeaderSize);
   httpConfig.setResponseHeaderSize(responseHeaderSize);
+  httpConfig.setSendServerVersion(false);
 
   for (URI ep : endpoints) {
 final ServerConnector connector;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: HADOOP-14615. Add ServiceOperations.stopQuietly that accept slf4j logger API. Contributed by Wenxin He.

2017-07-07 Thread xgong
HADOOP-14615. Add ServiceOperations.stopQuietly that accept slf4j logger API.
Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1aaa7f1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1aaa7f1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1aaa7f1e

Branch: refs/heads/YARN-5734
Commit: 1aaa7f1eacab20d1c27f410333a536033cad1aab
Parents: 186650d
Author: Steve Loughran 
Authored: Tue Jul 4 10:48:02 2017 +0100
Committer: Steve Loughran 
Committed: Tue Jul 4 10:48:02 2017 +0100

--
 .../hadoop/service/ServiceOperations.java   | 20 ++
 .../hadoop/service/TestServiceOperations.java   | 65 
 2 files changed, 85 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1aaa7f1e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
index 6c03e25..a0a77ce 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.slf4j.Logger;
 
 /**
  * This class contains a set of methods to work with services, especially
@@ -87,6 +88,25 @@ public final class ServiceOperations {
 return null;
   }
 
+  /**
+   * Stop a service; if it is null do nothing. Exceptions are caught and
+   * logged at warn level. (but not Throwables). This operation is intended to
+   * be used in cleanup operations
+   *
+   * @param log the log to warn at
+   * @param service a service; may be null
+   * @return any exception that was caught; null if none was.
+   * @see ServiceOperations#stopQuietly(Service)
+   */
+  public static Exception stopQuietly(Logger log, Service service) {
+try {
+  stop(service);
+} catch (Exception e) {
+  log.warn("When stopping the service {} : {}", service.getName(), e, e);
+  return e;
+}
+return null;
+  }
 
   /**
* Class to manage a list of {@link ServiceStateChangeListener} instances,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1aaa7f1e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
new file mode 100644
index 000..5df973d
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.service;
+
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.PrintWriter;
+
+import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.junit.Assert.assertThat;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Test miscellaneous 

[20/50] [abbrv] hadoop git commit: HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.

2017-07-07 Thread xgong
HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a9dc5f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a9dc5f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a9dc5f4

Branch: refs/heads/YARN-5734
Commit: 6a9dc5f44b0c7945e3e9a56248cd4ff80d5c8f0f
Parents: a2f0cbd
Author: Arpit Agarwal 
Authored: Fri Jun 30 10:20:12 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Jun 30 10:20:12 2017 -0700

--
 .../server/blockmanagement/BlockManager.java| 13 ++-
 .../PendingReconstructionBlocks.java|  8 +-
 .../namenode/metrics/NameNodeMetrics.java   | 18 
 .../TestPendingReconstruction.java  | 90 +++-
 4 files changed, 122 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a9dc5f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a0c4698..a5ee30b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1851,7 +1851,7 @@ public class BlockManager implements BlockStatsMXBean {
 (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
-  private BlockReconstructionWork scheduleReconstruction(BlockInfo block,
+  BlockReconstructionWork scheduleReconstruction(BlockInfo block,
   int priority) {
 // skip abandoned block or block reopened for append
 if (block.isDeleted() || !block.isCompleteOrCommitted()) {
@@ -1873,6 +1873,7 @@ public class BlockManager implements BlockStatsMXBean {
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be reconstructed from any node
   LOG.debug("Block {} cannot be reconstructed from any node", block);
+  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1885,6 +1886,7 @@ public class BlockManager implements BlockStatsMXBean {
   neededReconstruction.remove(block, priority);
   blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
   " it has enough replicas", block);
+  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1900,6 +1902,7 @@ public class BlockManager implements BlockStatsMXBean {
 if (block.isStriped()) {
   if (pendingNum > 0) {
 // Wait the previous reconstruction to finish.
+NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
 return null;
   }
 
@@ -3727,8 +3730,8 @@ public class BlockManager implements BlockStatsMXBean {
* The given node is reporting that it received a certain block.
*/
   @VisibleForTesting
-  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
-  throws IOException {
+  public void addBlock(DatanodeStorageInfo storageInfo, Block block,
+  String delHint) throws IOException {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 // Decrement number of blocks scheduled to this datanode.
 // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
@@ -3751,7 +3754,9 @@ public class BlockManager implements BlockStatsMXBean {
 BlockInfo storedBlock = getStoredBlock(block);
 if (storedBlock != null &&
 block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
-  pendingReconstruction.decrement(storedBlock, node);
+  if (pendingReconstruction.decrement(storedBlock, node)) {
+NameNode.getNameNodeMetrics().incSuccessfulReReplications();
+  }
 }
 processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
 delHintNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a9dc5f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
index 2221d1d..0f20daa 100644
--- 

[23/50] [abbrv] hadoop git commit: HADOOP-14443. Azure: Support retry and client side failover for authorization, SASKey and delegation token generation. Contributed by Santhosh G Nayak

2017-07-07 Thread xgong
HADOOP-14443. Azure: Support retry and client side failover for authorization, 
SASKey and delegation token generation. Contributed by Santhosh G Nayak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38996fdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38996fdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38996fdc

Branch: refs/heads/YARN-5734
Commit: 38996fdcf0987d1da00ce46f8284d8fcdce57329
Parents: bcba844
Author: Mingliang Liu 
Authored: Thu Jun 29 16:13:04 2017 -0700
Committer: Mingliang Liu 
Committed: Fri Jun 30 16:53:48 2017 -0700

--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  39 +--
 .../fs/azure/RemoteSASKeyGeneratorImpl.java | 268 +++
 .../fs/azure/RemoteWasbAuthorizerImpl.java  | 225 ++--
 .../fs/azure/SecureWasbRemoteCallHelper.java| 210 +++
 .../hadoop/fs/azure/WasbRemoteCallHelper.java   | 259 +-
 .../hadoop/fs/azure/security/Constants.java |  20 +-
 .../hadoop/fs/azure/security/JsonUtils.java |  52 
 .../RemoteWasbDelegationTokenManager.java   | 162 +++
 .../hadoop/fs/azure/security/SecurityUtils.java |  86 --
 .../hadoop/fs/azure/security/TokenUtils.java|  60 +
 .../security/WasbDelegationTokenManager.java|  54 
 .../fs/azure/security/WasbTokenRenewer.java |  77 +-
 .../hadoop-azure/src/site/markdown/index.md |  44 ++-
 .../TestNativeAzureFileSystemAuthorization.java |   2 +-
 .../fs/azure/TestWasbRemoteCallHelper.java  | 228 +---
 15 files changed, 1170 insertions(+), 616 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 22f79ff..f92 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -27,9 +27,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.net.URL;
 import java.nio.charset.Charset;
-import java.security.PrivilegedExceptionAction;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
@@ -65,15 +63,14 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
 import org.apache.hadoop.fs.azure.security.Constants;
-import org.apache.hadoop.fs.azure.security.SecurityUtils;
+import org.apache.hadoop.fs.azure.security.RemoteWasbDelegationTokenManager;
+import org.apache.hadoop.fs.azure.security.WasbDelegationTokenManager;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
-import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
-import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
@@ -1177,7 +1174,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
   private UserGroupInformation ugi;
 
-  private String delegationToken = null;
+  private WasbDelegationTokenManager wasbDelegationTokenManager;
 
   public NativeAzureFileSystem() {
 // set store in initialize()
@@ -1327,9 +1324,7 @@ public class NativeAzureFileSystem extends FileSystem {
 }
 
 if (UserGroupInformation.isSecurityEnabled() && kerberosSupportEnabled) {
-  DelegationTokenAuthenticator authenticator = new 
KerberosDelegationTokenAuthenticator();
-  authURL = new DelegationTokenAuthenticatedURL(authenticator);
-  credServiceUrl = SecurityUtils.getCredServiceUrls(conf);
+  this.wasbDelegationTokenManager = new 
RemoteWasbDelegationTokenManager(conf);
 }
   }
 
@@ -3002,31 +2997,7 @@ public class NativeAzureFileSystem extends FileSystem {
   @Override
   public synchronized Token getDelegationToken(final String renewer) throws 
IOException {
 if (kerberosSupportEnabled) {
-  try {
-final 

[13/50] [abbrv] hadoop git commit: Revert "HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin."

2017-07-07 Thread xgong
Revert "HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by 
Yiqun Lin."

This reverts commit 89a8edc0149e3f31a5ade9a0927c4b6332cf6b1a.

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/441378e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/441378e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/441378e7

Branch: refs/heads/YARN-5734
Commit: 441378e7e4609d89b7181dacc8ba92b253a962df
Parents: 5a75f73
Author: Andrew Wang 
Authored: Thu Jun 29 13:54:16 2017 -0700
Committer: Andrew Wang 
Committed: Thu Jun 29 13:54:16 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  7 ++---
 .../hdfs/server/protocol/SlowDiskReports.java   |  5 ++--
 .../dev-support/findbugsExcludeFile.xml |  5 
 .../hdfs/qjournal/server/JournalNode.java   | 16 +---
 .../hdfs/server/common/HdfsServerConstants.java |  7 +
 .../hdfs/server/datanode/DataStorage.java   | 12 +++--
 .../namenode/NNStorageRetentionManager.java | 27 +---
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  6 ++---
 .../offlineImageViewer/ImageLoaderCurrent.java  | 10 +++-
 9 files changed, 33 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 1f6022c..8acda61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2883,12 +2883,9 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
 synchronized (DFSClient.class) {
   if (STRIPED_READ_THREAD_POOL == null) {
-// Only after thread pool is fully constructed then save it to
-// volatile field.
-ThreadPoolExecutor threadPool = DFSUtilClient.getThreadPoolExecutor(1,
+STRIPED_READ_THREAD_POOL = DFSUtilClient.getThreadPoolExecutor(1,
 numThreads, 60, "StripedRead-", true);
-threadPool.allowCoreThreadTimeOut(true);
-STRIPED_READ_THREAD_POOL = threadPool;
+STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
index 496389a..8095c2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
@@ -101,9 +101,8 @@ public final class SlowDiskReports {
 }
 
 boolean areEqual;
-for (Map.Entry> entry : this.slowDisks
-.entrySet()) {
-  if (!entry.getValue().equals(that.slowDisks.get(entry.getKey( {
+for (String disk : this.slowDisks.keySet()) {
+  if (!this.slowDisks.get(disk).equals(that.slowDisks.get(disk))) {
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 9270990..be54efb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -252,9 +252,4 @@
 
 
 
-
-
-
-
-
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
--
diff --git 

[45/50] [abbrv] hadoop git commit: YARN-5946: Create YarnConfigurationStore interface and InMemoryConfigurationStore class. Contributed by Jonathan Hung

2017-07-07 Thread xgong
YARN-5946: Create YarnConfigurationStore interface and
InMemoryConfigurationStore class. Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aea6084f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aea6084f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aea6084f

Branch: refs/heads/YARN-5734
Commit: aea6084f0679a8cd8544c4681ba4213a4e1795ed
Parents: b527548
Author: Xuan 
Authored: Fri Feb 24 15:58:12 2017 -0800
Committer: Xuan 
Committed: Fri Jul 7 14:12:45 2017 -0700

--
 .../conf/InMemoryConfigurationStore.java|  86 +++
 .../capacity/conf/YarnConfigurationStore.java   | 154 +++
 .../conf/TestYarnConfigurationStore.java|  70 +
 3 files changed, 310 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aea6084f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
new file mode 100644
index 000..a208fb9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A default implementation of {@link YarnConfigurationStore}. Doesn't offer
+ * persistent configuration storage, just stores the configuration in memory.
+ */
+public class InMemoryConfigurationStore implements YarnConfigurationStore {
+
+  private Configuration schedConf;
+  private LinkedList pendingMutations;
+  private long pendingId;
+
+  @Override
+  public void initialize(Configuration conf, Configuration schedConf) {
+this.schedConf = schedConf;
+this.pendingMutations = new LinkedList<>();
+this.pendingId = 0;
+  }
+
+  @Override
+  public synchronized long logMutation(LogMutation logMutation) {
+logMutation.setId(++pendingId);
+pendingMutations.add(logMutation);
+return pendingId;
+  }
+
+  @Override
+  public synchronized boolean confirmMutation(long id, boolean isValid) {
+LogMutation mutation = pendingMutations.poll();
+// If confirmMutation is called out of order, discard mutations until id
+// is reached.
+while (mutation != null) {
+  if (mutation.getId() == id) {
+if (isValid) {
+  Map mutations = mutation.getUpdates();
+  for (Map.Entry kv : mutations.entrySet()) {
+schedConf.set(kv.getKey(), kv.getValue());
+  }
+}
+return true;
+  }
+  mutation = pendingMutations.poll();
+}
+return false;
+  }
+
+  @Override
+  public synchronized Configuration retrieve() {
+return schedConf;
+  }
+
+  @Override
+  public synchronized List getPendingMutations() {
+return pendingMutations;
+  }
+
+  @Override
+  public List getConfirmedConfHistory(long fromId) {
+// Unimplemented.
+return null;
+  }
+}


[41/50] [abbrv] hadoop git commit: Add release notes, changes, jdiff for 3.0.0-alpha4

2017-07-07 Thread xgong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f10864a8/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
new file mode 100644
index 000..4d4d0bc
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
@@ -0,0 +1,880 @@
+
+
+# "Apache Hadoop" Changelog
+
+## Release 3.0.0-alpha4 - 2017-06-30
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-10860](https://issues.apache.org/jira/browse/HDFS-10860) | Switch 
HttpFS from Tomcat to Jetty |  Blocker | httpfs | John Zhuge | John Zhuge |
+| [HADOOP-13929](https://issues.apache.org/jira/browse/HADOOP-13929) | ADLS 
connector should not check in contract-test-options.xml |  Major | fs/adl, test 
| John Zhuge | John Zhuge |
+| [HDFS-11100](https://issues.apache.org/jira/browse/HDFS-11100) | Recursively 
deleting file protected by sticky bit should fail |  Critical | fs | John Zhuge 
| John Zhuge |
+| [HADOOP-13805](https://issues.apache.org/jira/browse/HADOOP-13805) | 
UGI.getCurrentUser() fails if user does not have a keytab associated |  Major | 
security | Alejandro Abdelnur | Xiao Chen |
+| [HDFS-11405](https://issues.apache.org/jira/browse/HDFS-11405) | Rename 
"erasurecode" CLI subcommand to "ec" |  Blocker | erasure-coding | Andrew Wang 
| Manoj Govindassamy |
+| [HDFS-11426](https://issues.apache.org/jira/browse/HDFS-11426) | Refactor EC 
CLI to be similar to storage policies CLI |  Major | erasure-coding, shell | 
Andrew Wang | Andrew Wang |
+| [HDFS-11427](https://issues.apache.org/jira/browse/HDFS-11427) | Rename 
"rs-default" to "rs" |  Major | erasure-coding | Andrew Wang | Andrew Wang |
+| [HDFS-11382](https://issues.apache.org/jira/browse/HDFS-11382) | Persist 
Erasure Coding Policy ID in a new optional field in INodeFile in FSImage |  
Major | hdfs | Manoj Govindassamy | Manoj Govindassamy |
+| [HDFS-11428](https://issues.apache.org/jira/browse/HDFS-11428) | Change 
setErasureCodingPolicy to take a required string EC policy name |  Major | 
erasure-coding | Andrew Wang | Andrew Wang |
+| [HADOOP-14138](https://issues.apache.org/jira/browse/HADOOP-14138) | Remove 
S3A ref from META-INF service discovery, rely on existing core-default entry |  
Critical | fs/s3 | Steve Loughran | Steve Loughran |
+| [HDFS-11152](https://issues.apache.org/jira/browse/HDFS-11152) | Start 
erasure coding policy ID number from 1 instead of 0 to void potential 
unexpected errors |  Blocker | erasure-coding | SammiChen | SammiChen |
+| [HDFS-11314](https://issues.apache.org/jira/browse/HDFS-11314) | Enforce set 
of enabled EC policies on the NameNode |  Blocker | erasure-coding | Andrew 
Wang | Andrew Wang |
+| [HDFS-11505](https://issues.apache.org/jira/browse/HDFS-11505) | Do not 
enable any erasure coding policies by default |  Major | erasure-coding | 
Andrew Wang | Manoj Govindassamy |
+| [HADOOP-10101](https://issues.apache.org/jira/browse/HADOOP-10101) | Update 
guava dependency to the latest version |  Major | . | Rakesh R | Tsuyoshi Ozawa 
|
+| [HADOOP-14267](https://issues.apache.org/jira/browse/HADOOP-14267) | Make 
DistCpOptions class immutable |  Major | tools/distcp | Mingliang Liu | 
Mingliang Liu |
+| [HADOOP-14202](https://issues.apache.org/jira/browse/HADOOP-14202) | fix 
jsvc/secure user var inconsistencies |  Major | scripts | Allen Wittenauer | 
Allen Wittenauer |
+| [HADOOP-14174](https://issues.apache.org/jira/browse/HADOOP-14174) | Set 
default ADLS access token provider type to ClientCredential |  Major | fs/adl | 
John Zhuge | John Zhuge |
+| [YARN-6298](https://issues.apache.org/jira/browse/YARN-6298) | Metric 
preemptCall is not used in new preemption |  Blocker | fairscheduler | Yufei Gu 
| Yufei Gu |
+| [HADOOP-14285](https://issues.apache.org/jira/browse/HADOOP-14285) | Update 
minimum version of Maven from 3.0 to 3.3 |  Major | . | Akira Ajisaka | Akira 
Ajisaka |
+| [HADOOP-14225](https://issues.apache.org/jira/browse/HADOOP-14225) | Remove 
xmlenc dependency |  Minor | . | Chris Douglas | Chris Douglas |
+| [HADOOP-13665](https://issues.apache.org/jira/browse/HADOOP-13665) | Erasure 
Coding codec should support fallback coder |  Blocker | io | Wei-Chiu Chuang | 
Kai Sasaki |
+| [HADOOP-14248](https://issues.apache.org/jira/browse/HADOOP-14248) | Retire 
SharedInstanceProfileCredentialsProvider in trunk. |  Major | fs/s3 | Mingliang 
Liu | Mingliang Liu |
+| [HDFS-11565](https://issues.apache.org/jira/browse/HDFS-11565) | Use compact 
identifiers for built-in ECPolicies in HdfsFileStatus |  Blocker | 
erasure-coding | Andrew Wang | Andrew 

[08/50] [abbrv] hadoop git commit: HADOOP-14479. Erasurecode testcase failures with native enabled. Contributed by Sammi Chen

2017-07-07 Thread xgong
HADOOP-14479. Erasurecode testcase failures with native enabled. Contributed by 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea1da39b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea1da39b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea1da39b

Branch: refs/heads/YARN-5734
Commit: ea1da39b1908de7739eaecded64074355f0fd5d0
Parents: 20ba86d
Author: Kai Zheng 
Authored: Thu Jun 29 11:26:36 2017 +0800
Committer: Kai Zheng 
Committed: Thu Jun 29 11:26:36 2017 +0800

--
 .../io/erasurecode/coder/ErasureEncodingStep.java|  2 +-
 .../apache/hadoop/io/erasurecode/jni_xor_decoder.c   |  5 ++---
 .../apache/hadoop/io/erasurecode/jni_xor_encoder.c   |  2 +-
 .../io/erasurecode/TestCodecRawCoderMapping.java | 15 +++
 4 files changed, 15 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea1da39b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureEncodingStep.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureEncodingStep.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureEncodingStep.java
index f0b2be8..df4ed4b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureEncodingStep.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureEncodingStep.java
@@ -62,6 +62,6 @@ public class ErasureEncodingStep implements ErasureCodingStep 
{
 
   @Override
   public void finish() {
-rawEncoder.release();
+// do nothing
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea1da39b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_decoder.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_decoder.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_decoder.c
index ff0d3c5..13f66e2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_decoder.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_decoder.c
@@ -31,7 +31,7 @@
 typedef struct _XOREncoder {
   IsalCoder isalCoder;
   unsigned char* inputs[MMAX];
-  unsigned char* outputs[1];
+  unsigned char* outputs[KMAX];
 } XORDecoder;
 
 JNIEXPORT void JNICALL
@@ -58,8 +58,7 @@ 
Java_org_apache_hadoop_io_erasurecode_rawcoder_NativeXORRawDecoder_decodeImpl(
   numParityUnits = ((IsalCoder*)xorDecoder)->numParityUnits;
   chunkSize = (int)dataLen;
 
-  getInputs(env, inputs, inputOffsets, xorDecoder->inputs,
-   numDataUnits + numParityUnits);
+  getInputs(env, inputs, inputOffsets, xorDecoder->inputs, numDataUnits);
   getOutputs(env, outputs, outputOffsets, xorDecoder->outputs, numParityUnits);
 
   for (i = 0; i < numDataUnits + numParityUnits; i++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea1da39b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_encoder.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_encoder.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_encoder.c
index 3cfa01c..6efb5fb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_encoder.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_encoder.c
@@ -31,7 +31,7 @@
 typedef struct _XOREncoder {
   IsalCoder isalCoder;
   unsigned char* inputs[MMAX];
-  unsigned char* outputs[1];
+  unsigned char* outputs[KMAX];
 } XOREncoder;
 
 JNIEXPORT void JNICALL

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea1da39b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java
index 4a6fbd4..c7874dc 100644
--- 

[19/50] [abbrv] hadoop git commit: Revert "HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang."

2017-07-07 Thread xgong
Revert "HDFS-12043. Add counters for block re-replication. Contributed by Chen 
Liang."

Accidentally committed the wrong patch version, reverting to fix that.

This reverts commit 900221f95ea9fe1936b4d5f277e6047ee8734eca.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2f0cbd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2f0cbd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2f0cbd9

Branch: refs/heads/YARN-5734
Commit: a2f0cbd92f7e90909cf817c261a5fae13a9695b4
Parents: 3be2659
Author: Arpit Agarwal 
Authored: Fri Jun 30 10:19:27 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Jun 30 10:19:27 2017 -0700

--
 .../server/blockmanagement/BlockManager.java| 13 +--
 .../PendingReconstructionBlocks.java|  8 +-
 .../namenode/metrics/NameNodeMetrics.java   | 18 
 .../TestPendingReconstruction.java  | 86 +---
 4 files changed, 7 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f0cbd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a5ee30b..a0c4698 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1851,7 +1851,7 @@ public class BlockManager implements BlockStatsMXBean {
 (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
-  BlockReconstructionWork scheduleReconstruction(BlockInfo block,
+  private BlockReconstructionWork scheduleReconstruction(BlockInfo block,
   int priority) {
 // skip abandoned block or block reopened for append
 if (block.isDeleted() || !block.isCompleteOrCommitted()) {
@@ -1873,7 +1873,6 @@ public class BlockManager implements BlockStatsMXBean {
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be reconstructed from any node
   LOG.debug("Block {} cannot be reconstructed from any node", block);
-  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1886,7 +1885,6 @@ public class BlockManager implements BlockStatsMXBean {
   neededReconstruction.remove(block, priority);
   blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
   " it has enough replicas", block);
-  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1902,7 +1900,6 @@ public class BlockManager implements BlockStatsMXBean {
 if (block.isStriped()) {
   if (pendingNum > 0) {
 // Wait the previous reconstruction to finish.
-NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
 return null;
   }
 
@@ -3730,8 +3727,8 @@ public class BlockManager implements BlockStatsMXBean {
* The given node is reporting that it received a certain block.
*/
   @VisibleForTesting
-  public void addBlock(DatanodeStorageInfo storageInfo, Block block,
-  String delHint) throws IOException {
+  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
+  throws IOException {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 // Decrement number of blocks scheduled to this datanode.
 // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
@@ -3754,9 +3751,7 @@ public class BlockManager implements BlockStatsMXBean {
 BlockInfo storedBlock = getStoredBlock(block);
 if (storedBlock != null &&
 block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
-  if (pendingReconstruction.decrement(storedBlock, node)) {
-NameNode.getNameNodeMetrics().incSuccessfulReReplications();
-  }
+  pendingReconstruction.decrement(storedBlock, node);
 }
 processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
 delHintNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f0cbd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
 

[18/50] [abbrv] hadoop git commit: YARN-6694. Add certain envs to the default yarn.nodemanager.env-whitelist. Contributed by Jian He

2017-07-07 Thread xgong
YARN-6694. Add certain envs to the default yarn.nodemanager.env-whitelist. 
Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3be2659f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3be2659f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3be2659f

Branch: refs/heads/YARN-5734
Commit: 3be2659f83965a312d1095f03b7a95c7781c10af
Parents: af2773f
Author: Xuan 
Authored: Thu Jun 29 20:10:35 2017 -0700
Committer: Xuan 
Committed: Thu Jun 29 20:10:35 2017 -0700

--
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3be2659f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index cbd5345..81c9cb2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1013,7 +1013,7 @@
   
 Environment variables that containers may override rather 
than use NodeManager's default.
 yarn.nodemanager.env-whitelist
-
JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME
+
JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,PATH,LANG,TZ
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: HADOOP-14596. AWS SDK 1.11+ aborts() on close() if > 0 bytes in stream; logs error. Contributed by Steve Loughran

2017-07-07 Thread xgong
HADOOP-14596. AWS SDK 1.11+ aborts() on close() if > 0 bytes in stream; logs 
error. Contributed by Steve Loughran

Change-Id: I49173bf6163796903d64594a8ca8a4bd26ad2bfc


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72993b33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72993b33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72993b33

Branch: refs/heads/YARN-5734
Commit: 72993b33b704991f2a0bf743f31b164e58a2dabc
Parents: ec97519
Author: Mingliang Liu 
Authored: Thu Jun 29 17:00:25 2017 -0700
Committer: Mingliang Liu 
Committed: Thu Jun 29 17:07:52 2017 -0700

--
 .../apache/hadoop/fs/s3a/S3AInputStream.java| 26 +---
 1 file changed, 22 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72993b33/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 7d322a5..b88b7c1 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileSystem;
 
 import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.EOFException;
 import java.io.IOException;
@@ -78,7 +79,8 @@ public class S3AInputStream extends FSInputStream implements 
CanSetReadahead {
   private final String key;
   private final long contentLength;
   private final String uri;
-  public static final Logger LOG = S3AFileSystem.LOG;
+  private static final Logger LOG =
+  LoggerFactory.getLogger(S3AInputStream.class);
   private final S3AInstrumentation.InputStreamStatistics streamStatistics;
   private S3AEncryptionMethods serverSideEncryptionAlgorithm;
   private String serverSideEncryptionKey;
@@ -451,13 +453,27 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
   // if the amount of data remaining in the current request is greater
   // than the readahead value: abort.
   long remaining = remainingInCurrentRequest();
+  LOG.debug("Closing stream {}: {}", reason,
+  forceAbort ? "abort" : "soft");
   boolean shouldAbort = forceAbort || remaining > readahead;
   if (!shouldAbort) {
 try {
   // clean close. This will read to the end of the stream,
   // so, while cleaner, can be pathological on a multi-GB object
+
+  // explicitly drain the stream
+  long drained = 0;
+  while (wrappedStream.read() >= 0) {
+drained++;
+  }
+  LOG.debug("Drained stream of {} bytes", drained);
+
+  // now close it
   wrappedStream.close();
-  streamStatistics.streamClose(false, remaining);
+  // this MUST come after the close, so that if the IO operations fail
+  // and an abort is triggered, the initial attempt's statistics
+  // aren't collected.
+  streamStatistics.streamClose(false, drained);
 } catch (IOException e) {
   // exception escalates to an abort
   LOG.debug("When closing {} stream for {}", uri, reason, e);
@@ -467,13 +483,15 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
   if (shouldAbort) {
 // Abort, rather than just close, the underlying stream.  Otherwise, 
the
 // remaining object payload is read from S3 while closing the stream.
+LOG.debug("Aborting stream");
 wrappedStream.abort();
 streamStatistics.streamClose(true, remaining);
   }
-  LOG.debug("Stream {} {}: {}; streamPos={}, nextReadPos={}," +
+  LOG.debug("Stream {} {}: {}; remaining={} streamPos={},"
+  + " nextReadPos={}," +
   " request range {}-{} length={}",
   uri, (shouldAbort ? "aborted" : "closed"), reason,
-  pos, nextReadPos,
+  remaining, pos, nextReadPos,
   contentRangeStart, contentRangeFinish,
   length);
   wrappedStream = null;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: HDFS-11881. NameNode consumes a lot of memory for snapshot diff report generation. Contributed by Manoj Govindassamy.

2017-07-07 Thread xgong
HDFS-11881. NameNode consumes a lot of memory for snapshot diff report 
generation. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16c8dbde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16c8dbde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16c8dbde

Branch: refs/heads/YARN-5734
Commit: 16c8dbde574f49827fde5ee9add1861ee65d4645
Parents: ea1da39
Author: Wei-Chiu Chuang 
Authored: Thu Jun 29 06:38:41 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Jun 29 06:38:41 2017 -0700

--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  5 +-
 .../namenode/snapshot/SnapshotDiffInfo.java |  5 +-
 .../hadoop/hdfs/TestSnapshotCommands.java   | 50 
 3 files changed, 56 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16c8dbde/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 63d0025..feb3061 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -186,6 +186,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.LimitInputStream;
 
@@ -1455,7 +1456,7 @@ public class PBHelperClient {
 String toSnapshot = reportProto.getToSnapshot();
 List list = reportProto
 .getDiffReportEntriesList();
-List entries = new ArrayList<>();
+List entries = new ChunkedArrayList<>();
 for (SnapshotDiffReportEntryProto entryProto : list) {
   DiffReportEntry entry = convert(entryProto);
   if (entry != null)
@@ -2392,7 +2393,7 @@ public class PBHelperClient {
   return null;
 }
 List entries = report.getDiffList();
-List entryProtos = new ArrayList<>();
+List entryProtos = new ChunkedArrayList<>();
 for (DiffReportEntry entry : entries) {
   SnapshotDiffReportEntryProto entryProto = convert(entry);
   if (entryProto != null)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16c8dbde/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
index a576c57..fcd80ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.util.Diff.ListType;
 
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.SignedBytes;
+import org.apache.hadoop.util.ChunkedArrayList;
 
 /**
  * A class describing the difference between snapshots of a snapshottable
@@ -186,7 +187,7 @@ class SnapshotDiffInfo {
* @return A {@link SnapshotDiffReport} describing the difference
*/
   public SnapshotDiffReport generateReport() {
-List diffReportList = new ArrayList();
+List diffReportList = new ChunkedArrayList<>();
 for (Map.Entry drEntry : diffMap.entrySet()) {
   INode node = drEntry.getKey();
   byte[][] path = drEntry.getValue();
@@ -213,7 +214,7 @@ class SnapshotDiffInfo {
*/
   private List generateReport(ChildrenDiff dirDiff,
   byte[][] parentPath, boolean fromEarlier, Map 
renameMap) {
-List list = new ArrayList();
+List list = new ChunkedArrayList<>();
 List created = dirDiff.getList(ListType.CREATED);
 List deleted = dirDiff.getList(ListType.DELETED);
 byte[][] fullPath = new byte[parentPath.length + 1][];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16c8dbde/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java

[05/50] [abbrv] hadoop git commit: YARN-5311. Document graceful decommission CLI and usage. Contributed by Elek, Marton.

2017-07-07 Thread xgong
YARN-5311. Document graceful decommission CLI and usage. Contributed by Elek, 
Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e3eebc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e3eebc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e3eebc9

Branch: refs/heads/YARN-5734
Commit: 4e3eebc943835077e3dd0df9e0b9239ae604cb89
Parents: 990aa34
Author: Junping Du 
Authored: Wed Jun 28 15:32:04 2017 -0700
Committer: Junping Du 
Committed: Wed Jun 28 15:32:04 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   4 +
 .../src/site/markdown/GracefulDecommission.md   | 168 +++
 .../src/site/markdown/ResourceManagerRest.md|   2 +-
 3 files changed, 173 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e3eebc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ca71d35..a6d3360 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -831,6 +831,10 @@ public class YarnConfiguration extends Configuration {
   RM_PREFIX + "nodemanager-graceful-decommission-timeout-secs";
   public static final int DEFAULT_RM_NODE_GRACEFUL_DECOMMISSION_TIMEOUT = 3600;
 
+  /**
+   * Period in seconds of the poll timer task inside 
DecommissioningNodesWatcher
+   * to identify and take care of DECOMMISSIONING nodes missing regular heart 
beat.
+   */
   public static final String RM_DECOMMISSIONING_NODES_WATCHER_POLL_INTERVAL =
   RM_PREFIX + "decommissioning-nodes-watcher.poll-interval-secs";
   public static final int

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e3eebc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
new file mode 100644
index 000..2acb3d2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
@@ -0,0 +1,168 @@
+
+
+
+Graceful Decommission of Yarn Nodes
+===
+
+* [Overview](#overview)
+* [Features](#features)
+  * [NodesListManager detects and handles include and exclude list 
changes](#nodeslistmanager-detects-and-handles-include-and-exclude-list-changes)
+  * [RMNode handles decommission events](#rmnode-handles-decommission-events)
+  * [Automatic and asynchronous tracking of decommissioning nodes 
status](#automatic-and-asynchronous-tracking-of-decommissioning-nodes-status)
+  * [Per-Node decommission timeout 
support](#per-node-decommission-timeout-support)
+* [Configuration](#configuration)
+
+
+
+Overview
+
+
+Yarn is scalable very easily: any new NodeManager could join to the configured 
ResourceManager and start to execute jobs. But to achieve full elasticity we 
need a decommissioning process which helps to remove existing nodes and 
down-scale the cluster.
+
+Yarn Nodes could be decommissioned NORMAL or GRACEFUL.
+
+Normal Decommission of Yarn Nodes means an immediate shutdown.
+
+Graceful Decommission of Yarn Nodes is the mechanism to decommission NMs while 
minimize the impact to running applications. Once a node is in DECOMMISSIONING 
state, RM won't schedule new containers on it and will wait for running 
containers and applications to complete (or until decommissioning timeout 
exceeded) before transition the node into DECOMMISSIONED.
+
+## Quick start
+
+To do a normal decommissioning:
+
+1. Start a Yarn cluster (with NodeManageres and ResourceManager)
+2. Start a yarn job (for example with `yarn jar...` )
+3. Add `yarn.resourcemanager.nodes.exclude-path` property to your 
`yarn-site.xml` (Note: you don't need to restart the ResourceManager)
+4. Create a text file (the location is defined in the previous step) with one 
line which contains the name of a selected NodeManager 
+5. Call `./bin/yarn rmadmin  -refreshNodes`
+6. Result: The nodemanager is decommissioned *immediately*
+
+In the next sections we will cover some more detailed usage (for example: 
using graceful decommissioning 

[06/50] [abbrv] hadoop git commit: YARN-6280. Introduce deselect query param to skip ResourceRequest from getApp/getApps REST API. Contributed by Lantao Jin.

2017-07-07 Thread xgong
YARN-6280. Introduce deselect query param to skip ResourceRequest from 
getApp/getApps REST API. Contributed by Lantao Jin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1edca10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1edca10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1edca10

Branch: refs/heads/YARN-5734
Commit: c1edca101c32a5999100bc6031784274d416b599
Parents: 4e3eebc
Author: Sunil G 
Authored: Wed Jun 28 15:40:58 2017 -0700
Committer: Sunil G 
Committed: Wed Jun 28 15:40:58 2017 -0700

--
 .../resourcemanager/webapp/DeSelectFields.java  | 127 +
 .../webapp/RMWebServiceProtocol.java|   7 +-
 .../resourcemanager/webapp/RMWebServices.java   |  18 +-
 .../resourcemanager/webapp/dao/AppInfo.java |  29 ++-
 .../webapp/TestRMWebServices.java   |   4 +-
 .../webapp/TestRMWebServicesApps.java   |  64 +++
 .../src/site/markdown/ResourceManagerRest.md| 178 ++-
 7 files changed, 409 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1edca10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
new file mode 100644
index 000..258bbfa
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+/**
+ * DeSelectFields make the /apps api more flexible.
+ * It can be used to strip off more fields if there's such use case in future.
+ * You can simply extend it via two steps:
+ *  1. add a DeSelectType enum with a string literals
+ *  2. write your logical based on
+ * the return of method contains(DeSelectType)
+ */
+public class DeSelectFields {
+  private static final Log LOG =
+  LogFactory.getLog(DeSelectFields.class.getName());
+
+  private final Set types;
+
+  public DeSelectFields() {
+this.types = new HashSet();
+  }
+
+  /**
+   * Initial DeSelectFields with unselected fields.
+   * @param unselectedFields a set of unselected field.
+   */
+  public void initFields(Set unselectedFields) {
+if (unselectedFields == null) {
+  return;
+}
+for (String field : unselectedFields) {
+  if (!field.trim().isEmpty()) {
+String[] literalsArray = field.split(",");
+for (String literals : literalsArray) {
+  if (literals != null && !literals.trim().isEmpty()) {
+DeSelectType type = DeSelectType.obtainType(literals);
+if (type == null) {
+  LOG.warn("Invalid deSelects string " + literals.trim());
+  DeSelectType[] typeArray = DeSelectType.values();
+  String allSuppportLiterals = Arrays.toString(typeArray);
+  throw new BadRequestException("Invalid deSelects string "
+  + literals.trim() + " specified. It should be one of "
+  + allSuppportLiterals);
+} else {
+  this.types.add(type);
+}
+  }
+}
+  }
+}
+  }
+
+  /**
+   * Determine the 

[11/50] [abbrv] hadoop git commit: HADOOP-14601. Azure: Reuse ObjectMapper. Contributed by Mingliang Liu

2017-07-07 Thread xgong
HADOOP-14601. Azure: Reuse ObjectMapper. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b08cc973
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b08cc973
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b08cc973

Branch: refs/heads/YARN-5734
Commit: b08cc973964b4eb7e7a7445a440b19d3a0f3d4d5
Parents: 0c52da7
Author: Mingliang Liu 
Authored: Tue Jun 27 16:27:09 2017 -0700
Committer: Mingliang Liu 
Committed: Thu Jun 29 09:22:00 2017 -0700

--
 .../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java |  8 +---
 .../apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java | 10 +-
 .../apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java  |  7 ---
 3 files changed, 14 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b08cc973/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index d605e81..22f79ff 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -46,6 +46,7 @@ import com.fasterxml.jackson.core.JsonParseException;
 import com.fasterxml.jackson.core.JsonParser;
 import com.fasterxml.jackson.databind.JsonMappingException;
 import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectReader;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -109,6 +110,9 @@ public class NativeAzureFileSystem extends FileSystem {
 private static final int FORMATTING_BUFFER = 1;
 private boolean committed;
 public static final String SUFFIX = "-RenamePending.json";
+private static final ObjectReader READER = new ObjectMapper()
+.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true)
+.readerFor(JsonNode.class);
 
 // Prepare in-memory information needed to do or redo a folder rename.
 public FolderRenamePending(String srcKey, String dstKey, SelfRenewingLease 
lease,
@@ -168,11 +172,9 @@ public class NativeAzureFileSystem extends FileSystem {
   String contents = new String(bytes, 0, l, Charset.forName("UTF-8"));
 
   // parse the JSON
-  ObjectMapper objMapper = new ObjectMapper();
-  objMapper.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true);
   JsonNode json = null;
   try {
-json = objMapper.readValue(contents, JsonNode.class);
+json = READER.readValue(contents);
 this.committed = true;
   } catch (JsonMappingException e) {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b08cc973/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
index 387d911..0e9c700 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
@@ -24,6 +24,7 @@ import java.net.URISyntaxException;
 import java.net.UnknownHostException;
 import java.security.PrivilegedExceptionAction;
 
+import com.fasterxml.jackson.databind.ObjectReader;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.Validate;
 import org.apache.hadoop.conf.Configuration;
@@ -56,6 +57,9 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
   public static final Logger LOG =
   LoggerFactory.getLogger(AzureNativeFileSystemStore.class);
 
+  private static final ObjectReader RESPONSE_READER = new ObjectMapper()
+  .readerFor(RemoteSASKeyGenerationResponse.class);
+
   /**
* Container SAS Key generation OP name. {@value}
*/
@@ -276,11 +280,7 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
 httpGet.setHeader("Cookie", AuthenticatedURL.AUTH_COOKIE + "=" + 
token);
   }
   String responseBody = remoteCallHelper.makeRemoteGetRequest(httpGet);
-
-  ObjectMapper objectMapper = new ObjectMapper();
-  return 

[02/50] [abbrv] hadoop git commit: Add -E option in 'ls' to list erasure coding policy of each file and directory if applicable. Contributed by luhuichun via lei.

2017-07-07 Thread xgong
Add -E option in 'ls' to list erasure coding policy of each file and directory 
if applicable. Contributed by luhuichun via lei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6df0fdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6df0fdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6df0fdb

Branch: refs/heads/YARN-5734
Commit: d6df0fdbbda42b4ddab3810b5ac57336c6241ba7
Parents: 25d891a
Author: Lei Xu 
Authored: Wed Jun 28 13:47:23 2017 -0700
Committer: Lei Xu 
Committed: Wed Jun 28 13:47:23 2017 -0700

--
 .../java/org/apache/hadoop/fs/shell/Ls.java | 63 +++-
 .../src/site/markdown/FileSystemShell.md|  4 +-
 .../src/test/resources/testConf.xml |  6 +-
 .../test/resources/testErasureCodingConf.xml| 34 +++
 4 files changed, 89 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6df0fdb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index 47e87f5..221b3cb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.ContentSummary;
 
 /**
  * Get a listing of all files in that match the file patterns.
@@ -54,13 +55,14 @@ class Ls extends FsCommand {
   private static final String OPTION_MTIME = "t";
   private static final String OPTION_ATIME = "u";
   private static final String OPTION_SIZE = "S";
+  private static final String OPTION_ECPOLICY = "e";
 
   public static final String NAME = "ls";
   public static final String USAGE = "[-" + OPTION_PATHONLY + "] [-" +
   OPTION_DIRECTORY + "] [-" + OPTION_HUMAN + "] [-" +
   OPTION_HIDENONPRINTABLE + "] [-" + OPTION_RECURSIVE + "] [-" +
   OPTION_MTIME + "] [-" + OPTION_SIZE + "] [-" + OPTION_REVERSE + "] [-" +
-  OPTION_ATIME + "] [ ...]";
+  OPTION_ATIME + "] [-" + OPTION_ECPOLICY +"] [ ...]";
 
   public static final String DESCRIPTION =
   "List the contents that match the specified file pattern. If " +
@@ -91,7 +93,9 @@ class Ls extends FsCommand {
   "  Reverse the order of the sort.\n" +
   "  -" + OPTION_ATIME +
   "  Use time of last access instead of modification for\n" +
-  "  display and sorting.";
+  "  display and sorting.\n"+
+  "  -" + OPTION_ECPOLICY +
+  "  Display the erasure coding policy of files and directories.\n";
 
   protected final SimpleDateFormat dateFormat =
 new SimpleDateFormat("-MM-dd HH:mm");
@@ -104,6 +108,7 @@ class Ls extends FsCommand {
   private boolean orderTime;
   private boolean orderSize;
   private boolean useAtime;
+  private boolean displayECPolicy;
   private Comparator orderComparator;
 
   protected boolean humanReadable = false;
@@ -129,7 +134,7 @@ class Ls extends FsCommand {
 CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
 OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
 OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
-OPTION_MTIME, OPTION_SIZE, OPTION_ATIME);
+OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY);
 cf.parse(args);
 pathOnly = cf.getOpt(OPTION_PATHONLY);
 dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
@@ -140,6 +145,7 @@ class Ls extends FsCommand {
 orderTime = cf.getOpt(OPTION_MTIME);
 orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
 useAtime = cf.getOpt(OPTION_ATIME);
+displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
 if (args.isEmpty()) args.add(Path.CUR_DIR);
 
 initialiseOrderComparator();
@@ -245,25 +251,42 @@ class Ls extends FsCommand {
   return;
 }
 FileStatus stat = item.stat;
-String line = String.format(lineFormat,
-(stat.isDirectory() ? "d" : "-"),
-stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "),
-(stat.isFile() ? stat.getReplication() : "-"),
-stat.getOwner(),
-stat.getGroup(),
-formatSize(stat.getLen()),
-dateFormat.format(new Date(isUseAtime()
-? stat.getAccessTime()
-: stat.getModificationTime())),
-isHideNonPrintable() ? new 

[04/50] [abbrv] hadoop git commit: HADOOP-14609. NPE in AzureNativeFileSystemStore.checkContainer() if StorageException lacks an error code. Contributed by Steve Loughran

2017-07-07 Thread xgong
HADOOP-14609. NPE in AzureNativeFileSystemStore.checkContainer() if 
StorageException lacks an error code. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/990aa34d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/990aa34d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/990aa34d

Branch: refs/heads/YARN-5734
Commit: 990aa34de23c625163745ebc338483065d955bbe
Parents: e9d8bdf
Author: Mingliang Liu 
Authored: Wed Jun 28 14:18:59 2017 -0700
Committer: Mingliang Liu 
Committed: Wed Jun 28 14:18:59 2017 -0700

--
 .../hadoop/fs/azure/AzureNativeFileSystemStore.java   | 10 +-
 .../org/apache/hadoop/fs/azure/SelfRenewingLease.java |  4 ++--
 .../apache/hadoop/fs/azure/TestBlobDataValidation.java|  6 +++---
 3 files changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/990aa34d/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index d026220..5fa964a 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -1194,8 +1194,8 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 container.downloadAttributes(getInstrumentedContext());
 currentKnownContainerState = ContainerState.Unknown;
   } catch (StorageException ex) {
-if (ex.getErrorCode().equals(
-StorageErrorCode.RESOURCE_NOT_FOUND.toString())) {
+if (StorageErrorCode.RESOURCE_NOT_FOUND.toString()
+.equals(ex.getErrorCode())) {
   currentKnownContainerState = ContainerState.DoesntExist;
 } else {
   throw ex;
@@ -1596,7 +1596,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   if (t != null && t instanceof StorageException) {
 StorageException se = (StorageException) t;
 // If we got this exception, the blob should have already been created
-if (!se.getErrorCode().equals("LeaseIdMissing")) {
+if (!"LeaseIdMissing".equals(se.getErrorCode())) {
   throw new AzureException(e);
 }
   } else {
@@ -2427,7 +2427,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   // 2. It got there after one-or-more retries THEN
   // we swallow the exception.
   if (e.getErrorCode() != null &&
-  e.getErrorCode().equals("BlobNotFound") &&
+  "BlobNotFound".equals(e.getErrorCode()) &&
   operationContext.getRequestResults().size() > 1 &&
   operationContext.getRequestResults().get(0).getException() != null) {
 LOG.debug("Swallowing delete exception on retry: {}", e.getMessage());
@@ -2478,7 +2478,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   Throwable t = e.getCause();
   if(t != null && t instanceof StorageException) {
 StorageException se = (StorageException) t;
-if(se.getErrorCode().equals(("LeaseIdMissing"))){
+if ("LeaseIdMissing".equals(se.getErrorCode())){
   SelfRenewingLease lease = null;
   try {
 lease = acquireLease(key);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/990aa34d/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
index 76098f3..00d5e99 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
@@ -82,7 +82,7 @@ public class SelfRenewingLease {
 // Throw again if we don't want to keep waiting.
 // We expect it to be that the lease is already present,
 // or in some cases that the blob does not exist.
-if (!e.getErrorCode().equals("LeaseAlreadyPresent")) {
+if (!"LeaseAlreadyPresent".equals(e.getErrorCode())) {
   LOG.info(
 "Caught exception when trying to get lease on blob "
 + blobWrapper.getUri().toString() + ". " + 

hadoop git commit: MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is incompatible with DB2. Contributed by ramtin and Gergely Novák.

2017-07-07 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1c40df5bc -> f823f9fd7


MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is 
incompatible with DB2. Contributed by ramtin and Gergely Novák.

(cherry picked from commit f484a6ff602d48413556a1d046670e2003c71c2e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f823f9fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f823f9fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f823f9fd

Branch: refs/heads/branch-2
Commit: f823f9fd784fa4944178247a82df24fba03c2051
Parents: 1c40df5
Author: Junping Du 
Authored: Fri Jul 7 13:23:43 2017 -0700
Committer: Junping Du 
Committed: Fri Jul 7 13:27:56 2017 -0700

--
 .../hadoop/mapreduce/lib/db/DBOutputFormat.java | 15 ++-
 .../mapreduce/lib/db/TestDBOutputFormat.java| 45 
 2 files changed, 58 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f823f9fd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
index 2e3a9d8..c222bf5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DatabaseMetaData;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 
@@ -51,6 +52,8 @@ public class DBOutputFormat
 extends OutputFormat {
 
   private static final Log LOG = LogFactory.getLog(DBOutputFormat.class);
+  public String dbProductName = "DEFAULT";
+
   public void checkOutputSpecs(JobContext context) 
   throws IOException, InterruptedException {}
 
@@ -158,7 +161,12 @@ extends OutputFormat {
 query.append(",");
   }
 }
-query.append(");");
+
+if (dbProductName.startsWith("DB2") || dbProductName.startsWith("ORACLE")) 
{
+  query.append(")");
+} else {
+  query.append(");");
+}
 
 return query.toString();
   }
@@ -177,7 +185,10 @@ extends OutputFormat {
 try {
   Connection connection = dbConf.getConnection();
   PreparedStatement statement = null;
-  
+
+  DatabaseMetaData dbMeta = connection.getMetaData();
+  this.dbProductName = dbMeta.getDatabaseProductName().toUpperCase();
+
   statement = connection.prepareStatement(
 constructQuery(tableName, fieldNames));
   return new DBRecordWriter(connection, statement);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f823f9fd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
index 014855f..e547c8a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
+import java.lang.reflect.Field;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
@@ -26,6 +28,7 @@ import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
 
 public class TestDBOutputFormat {
   private String[] fieldNames = new String[] { "id", "name", "value" };
@@ -47,6 +50,48 @@ public class 

hadoop git commit: MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is incompatible with DB2. Contributed by ramtin and Gergely Novák.

2017-07-07 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/trunk f10864a82 -> f484a6ff6


MAPREDUCE-6246. DBOutputFormat.java appending extra semicolon to query which is 
incompatible with DB2. Contributed by ramtin and Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f484a6ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f484a6ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f484a6ff

Branch: refs/heads/trunk
Commit: f484a6ff602d48413556a1d046670e2003c71c2e
Parents: f10864a
Author: Junping Du 
Authored: Fri Jul 7 13:23:43 2017 -0700
Committer: Junping Du 
Committed: Fri Jul 7 13:26:16 2017 -0700

--
 .../hadoop/mapreduce/lib/db/DBOutputFormat.java | 15 ++-
 .../mapreduce/lib/db/TestDBOutputFormat.java| 45 
 2 files changed, 58 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f484a6ff/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
index 2e3a9d8..c222bf5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DatabaseMetaData;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 
@@ -51,6 +52,8 @@ public class DBOutputFormat
 extends OutputFormat {
 
   private static final Log LOG = LogFactory.getLog(DBOutputFormat.class);
+  public String dbProductName = "DEFAULT";
+
   public void checkOutputSpecs(JobContext context) 
   throws IOException, InterruptedException {}
 
@@ -158,7 +161,12 @@ extends OutputFormat {
 query.append(",");
   }
 }
-query.append(");");
+
+if (dbProductName.startsWith("DB2") || dbProductName.startsWith("ORACLE")) 
{
+  query.append(")");
+} else {
+  query.append(");");
+}
 
 return query.toString();
   }
@@ -177,7 +185,10 @@ extends OutputFormat {
 try {
   Connection connection = dbConf.getConnection();
   PreparedStatement statement = null;
-  
+
+  DatabaseMetaData dbMeta = connection.getMetaData();
+  this.dbProductName = dbMeta.getDatabaseProductName().toUpperCase();
+
   statement = connection.prepareStatement(
 constructQuery(tableName, fieldNames));
   return new DBRecordWriter(connection, statement);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f484a6ff/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
index 014855f..e547c8a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.mapreduce.lib.db;
 
 import java.io.IOException;
+import java.lang.reflect.Field;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
@@ -26,6 +28,7 @@ import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
 
 public class TestDBOutputFormat {
   private String[] fieldNames = new String[] { "id", "name", "value" };
@@ -47,6 +50,48 @@ public class TestDBOutputFormat {
   }
 
   @Test
+  public void testDB2ConstructQuery() {
+

svn commit: r1801205 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/ publish/docs/ publish/docs/r3.0.0-alpha4/ publish/docs/r3.0.0-alpha4/api/ publish/docs/r3.0.0-alpha

2017-07-07 Thread wang
Author: wang
Date: Fri Jul  7 18:41:32 2017
New Revision: 1801205

URL: http://svn.apache.org/viewvc?rev=1801205=rev
Log:
Update site for release 3.0.0-alpha4


[This commit notification would consist of 3864 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12013: libhdfs++: read with offset at EOF should return 0 bytes instead of error. Contributed by Xiaowei Zhu

2017-07-07 Thread jhc
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 0d2d073e2 -> 821f9717e


HDFS-12013: libhdfs++: read with offset at EOF should return 0 bytes instead of 
error.  Contributed by Xiaowei Zhu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/821f9717
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/821f9717
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/821f9717

Branch: refs/heads/HDFS-8707
Commit: 821f9717ece8bbfe76807f08e0bdbbfdbab72549
Parents: 0d2d073
Author: James Clampffer 
Authored: Fri Jul 7 14:04:42 2017 -0400
Committer: James Clampffer 
Committed: Fri Jul 7 14:04:42 2017 -0400

--
 .../src/main/native/libhdfspp/lib/fs/filehandle.cc  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/821f9717/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc
index eea7ac9..2087d53 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc
@@ -181,7 +181,10 @@ void FileHandleImpl::AsyncPreadSome(
 return;
   }
 
-  if(offset >= file_info_->file_length_){
+  if(offset == file_info_->file_length_) {
+handler(Status::OK(), "", 0);
+return;
+  } else if(offset > file_info_->file_length_){
 handler(Status::InvalidOffset("AsyncPreadSome: trying to begin a read past 
the EOF"), "", 0);
 return;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: Add release notes, changes, jdiff for 3.0.0-alpha4

2017-07-07 Thread wang
Add release notes, changes, jdiff for 3.0.0-alpha4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f10864a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f10864a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f10864a8

Branch: refs/heads/trunk
Commit: f10864a820c5104d748378aa1c2c408e4aad8a6c
Parents: 7cd0952
Author: Andrew Wang 
Authored: Fri Jul 7 11:01:59 2017 -0700
Committer: Andrew Wang 
Committed: Fri Jul 7 11:01:59 2017 -0700

--
 .../3.0.0-alpha4/CHANGES.3.0.0-alpha4.md| 880 +++
 .../3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md   | 492 +++
 .../jdiff/Apache_Hadoop_HDFS_3.0.0-alpha4.xml   | 322 +++
 3 files changed, 1694 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: Add release notes, changes, jdiff for 3.0.0-alpha4

2017-07-07 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7cd095272 -> f10864a82


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f10864a8/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
new file mode 100644
index 000..3ad6cc6
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md
@@ -0,0 +1,492 @@
+
+
+# "Apache Hadoop"  3.0.0-alpha4 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-13956](https://issues.apache.org/jira/browse/HADOOP-13956) | 
*Critical* | **Read ADLS credentials from Credential Provider**
+
+The hadoop-azure-datalake file system now supports configuration of the Azure 
Data Lake Store account credentials using the standard Hadoop Credential 
Provider API. For details, please refer to the documentation on 
hadoop-azure-datalake and the Credential Provider API.
+
+
+---
+
+* [MAPREDUCE-6404](https://issues.apache.org/jira/browse/MAPREDUCE-6404) | 
*Major* | **Allow AM to specify a port range for starting its webapp**
+
+Add a new configuration - "yarn.app.mapreduce.am.webapp.port-range" to specify 
port-range for webapp launched by AM.
+
+
+---
+
+* [HDFS-10860](https://issues.apache.org/jira/browse/HDFS-10860) | *Blocker* | 
**Switch HttpFS from Tomcat to Jetty**
+
+
+
+The following environment variables are deprecated. Set the corresponding
+configuration properties instead.
+
+Environment Variable| Configuration Property   | Configuration File
+|--|
+HTTPFS_TEMP | hadoop.http.temp.dir | httpfs-site.xml
+HTTPFS_HTTP_PORT| hadoop.httpfs.http.port  | httpfs-site.xml
+HTTPFS_MAX_HTTP_HEADER_SIZE | hadoop.http.max.request.header.size and 
hadoop.http.max.response.header.size | httpfs-site.xml
+HTTPFS_MAX_THREADS  | hadoop.http.max.threads  | httpfs-site.xml
+HTTPFS_SSL_ENABLED  | hadoop.httpfs.ssl.enabled| httpfs-site.xml
+HTTPFS_SSL_KEYSTORE_FILE| ssl.server.keystore.location | ssl-server.xml
+HTTPFS_SSL_KEYSTORE_PASS| ssl.server.keystore.password | ssl-server.xml
+
+These default HTTP Services have been added.
+
+Name   | Description
+---|
+/conf  | Display configuration properties
+/jmx   | Java JMX management interface
+/logLevel  | Get or set log level per class
+/logs  | Display log files
+/stacks| Display JVM stacks
+/static/index.html | The static home page
+
+Script httpfs.sh has been deprecated, use `hdfs httpfs` instead. The new 
scripts are based on the Hadoop shell scripting framework. `hadoop daemonlog` 
is supported. SSL configurations are read from ssl-server.xml.
+
+
+---
+
+* [HDFS-11210](https://issues.apache.org/jira/browse/HDFS-11210) | *Major* | 
**Enhance key rolling to guarantee new KeyVersion is returned from 
generateEncryptedKeys after a key is rolled**
+
+ 
+
+An `invalidateCache` command has been added to the KMS.
+The `rollNewVersion` semantics of the KMS has been improved so that after a 
key's version is rolled, `generateEncryptedKey` of that key guarantees to 
return the `EncryptedKeyVersion` based on the new key version.
+
+
+---
+
+* [HADOOP-13075](https://issues.apache.org/jira/browse/HADOOP-13075) | *Major* 
| **Add support for SSE-KMS and SSE-C in s3a filesystem**
+
+The new encryption options SSE-KMS and especially SSE-C must be considered 
experimental at present. If you are using SSE-C, problems may arise if the 
bucket mixes encrypted and unencrypted files. For SSE-KMS, there may be extra 
throttling of IO, especially with the fadvise=random option. You may wish to 
request an increase in your KMS IOPs limits.
+
+
+---
+
+* [HDFS-11026](https://issues.apache.org/jira/browse/HDFS-11026) | *Major* | 
**Convert BlockTokenIdentifier to use Protobuf**
+
+Changed the serialized format of BlockTokenIdentifier to protocol buffers. 
Includes logic to decode both the old Writable format and the new PB format to 
support existing clients. Client implementations in other languages will 
require similar functionality.
+
+
+---
+
+* [HADOOP-13929](https://issues.apache.org/jira/browse/HADOOP-13929) | *Major* 
| **ADLS connector should not check in contract-test-options.xml**
+
+To run live unit tests, create src/test/resources/auth-keys.xml with the same 
properties as in the deprecated contract-test-options.xml.
+
+
+---
+
+* 

[2/3] hadoop git commit: Add release notes, changes, jdiff for 3.0.0-alpha4

2017-07-07 Thread wang
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f10864a8/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
new file mode 100644
index 000..4d4d0bc
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.md
@@ -0,0 +1,880 @@
+
+
+# "Apache Hadoop" Changelog
+
+## Release 3.0.0-alpha4 - 2017-06-30
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-10860](https://issues.apache.org/jira/browse/HDFS-10860) | Switch 
HttpFS from Tomcat to Jetty |  Blocker | httpfs | John Zhuge | John Zhuge |
+| [HADOOP-13929](https://issues.apache.org/jira/browse/HADOOP-13929) | ADLS 
connector should not check in contract-test-options.xml |  Major | fs/adl, test 
| John Zhuge | John Zhuge |
+| [HDFS-11100](https://issues.apache.org/jira/browse/HDFS-11100) | Recursively 
deleting file protected by sticky bit should fail |  Critical | fs | John Zhuge 
| John Zhuge |
+| [HADOOP-13805](https://issues.apache.org/jira/browse/HADOOP-13805) | 
UGI.getCurrentUser() fails if user does not have a keytab associated |  Major | 
security | Alejandro Abdelnur | Xiao Chen |
+| [HDFS-11405](https://issues.apache.org/jira/browse/HDFS-11405) | Rename 
"erasurecode" CLI subcommand to "ec" |  Blocker | erasure-coding | Andrew Wang 
| Manoj Govindassamy |
+| [HDFS-11426](https://issues.apache.org/jira/browse/HDFS-11426) | Refactor EC 
CLI to be similar to storage policies CLI |  Major | erasure-coding, shell | 
Andrew Wang | Andrew Wang |
+| [HDFS-11427](https://issues.apache.org/jira/browse/HDFS-11427) | Rename 
"rs-default" to "rs" |  Major | erasure-coding | Andrew Wang | Andrew Wang |
+| [HDFS-11382](https://issues.apache.org/jira/browse/HDFS-11382) | Persist 
Erasure Coding Policy ID in a new optional field in INodeFile in FSImage |  
Major | hdfs | Manoj Govindassamy | Manoj Govindassamy |
+| [HDFS-11428](https://issues.apache.org/jira/browse/HDFS-11428) | Change 
setErasureCodingPolicy to take a required string EC policy name |  Major | 
erasure-coding | Andrew Wang | Andrew Wang |
+| [HADOOP-14138](https://issues.apache.org/jira/browse/HADOOP-14138) | Remove 
S3A ref from META-INF service discovery, rely on existing core-default entry |  
Critical | fs/s3 | Steve Loughran | Steve Loughran |
+| [HDFS-11152](https://issues.apache.org/jira/browse/HDFS-11152) | Start 
erasure coding policy ID number from 1 instead of 0 to void potential 
unexpected errors |  Blocker | erasure-coding | SammiChen | SammiChen |
+| [HDFS-11314](https://issues.apache.org/jira/browse/HDFS-11314) | Enforce set 
of enabled EC policies on the NameNode |  Blocker | erasure-coding | Andrew 
Wang | Andrew Wang |
+| [HDFS-11505](https://issues.apache.org/jira/browse/HDFS-11505) | Do not 
enable any erasure coding policies by default |  Major | erasure-coding | 
Andrew Wang | Manoj Govindassamy |
+| [HADOOP-10101](https://issues.apache.org/jira/browse/HADOOP-10101) | Update 
guava dependency to the latest version |  Major | . | Rakesh R | Tsuyoshi Ozawa 
|
+| [HADOOP-14267](https://issues.apache.org/jira/browse/HADOOP-14267) | Make 
DistCpOptions class immutable |  Major | tools/distcp | Mingliang Liu | 
Mingliang Liu |
+| [HADOOP-14202](https://issues.apache.org/jira/browse/HADOOP-14202) | fix 
jsvc/secure user var inconsistencies |  Major | scripts | Allen Wittenauer | 
Allen Wittenauer |
+| [HADOOP-14174](https://issues.apache.org/jira/browse/HADOOP-14174) | Set 
default ADLS access token provider type to ClientCredential |  Major | fs/adl | 
John Zhuge | John Zhuge |
+| [YARN-6298](https://issues.apache.org/jira/browse/YARN-6298) | Metric 
preemptCall is not used in new preemption |  Blocker | fairscheduler | Yufei Gu 
| Yufei Gu |
+| [HADOOP-14285](https://issues.apache.org/jira/browse/HADOOP-14285) | Update 
minimum version of Maven from 3.0 to 3.3 |  Major | . | Akira Ajisaka | Akira 
Ajisaka |
+| [HADOOP-14225](https://issues.apache.org/jira/browse/HADOOP-14225) | Remove 
xmlenc dependency |  Minor | . | Chris Douglas | Chris Douglas |
+| [HADOOP-13665](https://issues.apache.org/jira/browse/HADOOP-13665) | Erasure 
Coding codec should support fallback coder |  Blocker | io | Wei-Chiu Chuang | 
Kai Sasaki |
+| [HADOOP-14248](https://issues.apache.org/jira/browse/HADOOP-14248) | Retire 
SharedInstanceProfileCredentialsProvider in trunk. |  Major | fs/s3 | Mingliang 
Liu | Mingliang Liu |
+| [HDFS-11565](https://issues.apache.org/jira/browse/HDFS-11565) | Use compact 
identifiers for built-in ECPolicies in HdfsFileStatus |  Blocker | 
erasure-coding | Andrew Wang | Andrew 

hadoop git commit: HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He.

2017-07-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 730b21e9f -> 1c40df5bc


HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. 
Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c40df5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c40df5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c40df5b

Branch: refs/heads/branch-2
Commit: 1c40df5bcd3468bbfdd127282075b70f91176d02
Parents: 730b21e
Author: Akira Ajisaka 
Authored: Sat Jul 8 03:01:18 2017 +0900
Committer: Akira Ajisaka 
Committed: Sat Jul 8 03:01:18 2017 +0900

--
 .../fs/FileContextCreateMkdirBaseTest.java  |  2 +-
 .../hadoop/fs/FileContextPermissionBase.java|  5 ++--
 .../apache/hadoop/fs/FileContextUtilBase.java   |  5 ++--
 .../org/apache/hadoop/fs/TestListFiles.java |  4 +--
 .../fs/TestLocalFileSystemPermission.java   |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java  |  7 ++---
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  6 ++--
 .../hadoop/ha/TestZKFailoverController.java |  5 ++--
 .../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 -
 .../java/org/apache/hadoop/ipc/TestIPC.java |  5 ++--
 .../apache/hadoop/ipc/TestMiniRPCBenchmark.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 16 +--
 .../hadoop/security/TestGroupFallback.java  | 12 
 .../security/TestUserGroupInformation.java  |  2 +-
 .../hadoop/security/ssl/TestSSLFactory.java |  2 +-
 .../delegation/web/TestWebDelegationToken.java  |  2 +-
 .../apache/hadoop/test/GenericTestUtils.java| 30 
 .../hadoop/test/TestGenericTestUtils.java   | 10 +++
 .../hadoop/util/Crc32PerformanceTest.java   | 11 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   |  2 +-
 hadoop-common-project/hadoop-nfs/pom.xml|  6 
 .../apache/hadoop/oncrpc/TestFrameDecoder.java  |  6 ++--
 23 files changed, 101 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c40df5b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index c1de27a..fbd598c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -30,6 +29,7 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
 
 import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
 
 /**
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c40df5b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
index 5c44a98..8516800 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -32,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.junit.Assert.assertEquals;
@@ -60,8 +62,7 @@ public abstract class FileContextPermissionBase {
   
   {
 try {
-  ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
-  .setLevel(org.apache.log4j.Level.DEBUG);
+  GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
 }
  

svn commit: r20364 - /release/hadoop/common/current

2017-07-07 Thread wang
Author: wang
Date: Fri Jul  7 17:58:40 2017
New Revision: 20364

Log:
Update current pointer to hadoop-3.0.0-alpha4

Modified:
release/hadoop/common/current

Modified: release/hadoop/common/current
==
--- release/hadoop/common/current (original)
+++ release/hadoop/common/current Fri Jul  7 17:58:40 2017
@@ -1 +1 @@
-link hadoop-3.0.0-alpha3
\ No newline at end of file
+link hadoop-3.0.0-alpha4
\ No newline at end of file



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r20363 - /dev/hadoop/hadoop-3.0.0-alpha4/ /release/hadoop/common/hadoop-3.0.0-alpha4/

2017-07-07 Thread wang
Author: wang
Date: Fri Jul  7 17:57:38 2017
New Revision: 20363

Log:
Move Hadoop 3.0.0-alpha4 bits into release

Added:
release/hadoop/common/hadoop-3.0.0-alpha4/
  - copied from r20362, dev/hadoop/hadoop-3.0.0-alpha4/
Removed:
dev/hadoop/hadoop-3.0.0-alpha4/


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r20362 - /dev/hadoop/hadoop-3.0.0-alpha4/

2017-07-07 Thread wang
Author: wang
Date: Fri Jul  7 17:56:19 2017
New Revision: 20362

Log:
Adding Hadoop 3.0.0-alpha4 bits to dev

Added:
dev/hadoop/hadoop-3.0.0-alpha4/
dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz   (with props)
dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.asc
dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.md5
dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.mds
dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz   (with props)
dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz.asc
dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz.md5
dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz.mds

Added: dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.asc
==
--- dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.asc (added)
+++ dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.asc Fri Jul  
7 17:56:19 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJZVbIFAAoJEAjVGgp1ARBcIXIP/2WaCLqqjaMm50CKDBDJf0rn
+aPTZcvM+60NHstgCPgPRnFVAxHg4Ggubbcuxw9hDO0Ld8UkFZm5pDirNuO9R65GX
+ZJLjnQNf4k/+N8lrrCiVCgvkSFqqrqxTQoH7yae+j4/AKV4/uBzJSzug0M00A8Hj
+cmdhIcbjnZ5jyvDh45HQblo8PZ2P0m5VIt3IFBlLWMrtEIF7fGs4jf2q1kJHnznR
+NvCzQVNqMyEv8I+CytP/7798YfGkvc7swwp2dkZ7XnK+G+qcGHB+5Nld5XyxmbvM
++F+qj2Wjno6BViPaDFPYZ4bJKcRag4GjAcGV386YzBl2u9X2DYai+FOXebdW8r7i
+UeFQBnyKq2HNVP+a9SjoIMzVxozDXz+3mdLdv1Dd6KjWJxkBUXOR11lb8MSSSQ2v
+d0wA8K6dqC5UI/yi5FZbYUF4jTpaC2gCoMTnHd4+OVcaywWLeUWqOSPLAYK7/v7r
+4q+NLh4K1AEQPw5ur71EmZqGeXwIrHySpcPP8kW2/cQacb4KvCIWsXbhcXUBQJjr
+mq/RP1L7fVBK2/9ojji2nHLylb0qsgYvZXg6EdcxnLv/APo3UTguHQS9HprmgWUV
+RXdLqV9J6FhPu2KrGRHZJlWR5hiSvOq+eGpmmxEoe+mOJFiYvYaCHslipWpnGlIW
+sxpETWm6BWXoJpmOzYrP
+=TL8k
+-END PGP SIGNATURE-

Added: dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.md5
==
--- dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.md5 (added)
+++ dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.md5 Fri Jul  
7 17:56:19 2017
@@ -0,0 +1,2 @@
+$ /usr/bin/md5sum /build/source/target/artifacts/hadoop-3.0.0-alpha4-src.tar.gz
+282c6a8ae333f904dbc82e27417149d4  
/build/source/target/artifacts/hadoop-3.0.0-alpha4-src.tar.gz

Added: dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.mds
==
--- dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.mds (added)
+++ dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.mds Fri Jul  
7 17:56:19 2017
@@ -0,0 +1,16 @@
+/build/source/target/artifacts/hadoop-3.0.0-alpha4-src.tar.gz: 
+   MD5 = 28 2C 6A 8A E3 33 F9 04  DB C8 2E 27 41 71 49 D4
+/build/source/target/artifacts/hadoop-3.0.0-alpha4-src.tar.gz: 
+  SHA1 = 3F25 517C 0B58 9576 11D6  1855 C099 6A3E C4A1 3199
+/build/source/target/artifacts/hadoop-3.0.0-alpha4-src.tar.gz: 
+RMD160 = 8DA8 83E9 913D 1689 0665  5B19 6E33 2716 4664 C132
+/build/source/target/artifacts/hadoop-3.0.0-alpha4-src.tar.gz: 
+SHA224 = 3AF63FC7 79A8BBFF 963AF7D7 CF72B076 F4685157 1D081B03 2F4EE387
+/build/source/target/artifacts/hadoop-3.0.0-alpha4-src.tar.gz: 
+SHA256 = 46CB0452 DBD1720E FC478FB0 35C2CB2B 7EC92B94 956526EB BE4F44E4 
6C13ABC6
+/build/source/target/artifacts/hadoop-3.0.0-alpha4-src.tar.gz: 
+SHA384 = 34AA6DA4 9B6CDC95 A62DA10A 652BE45F BCFFFE11 EC34EECA A8E119EF 
DCD3D956
+ 2080D5E3 276C5169 5ED5AFFB D1E884C6
+/build/source/target/artifacts/hadoop-3.0.0-alpha4-src.tar.gz: 
+SHA512 = C78A9842 B017CAF8 E9021BA1 07D165A9 CAC5CC9E 62F0CC25 E798228E 
C258D685
+ 2AB9A628 EFA1E101 43E8E2CB 6C8A9AE2 4665575D A3CD0359 2FB773D4 
0984D2F9

Added: dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz
==
Binary file - no diff available.

Propchange: dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz.asc
==
--- dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz.asc (added)
+++ dev/hadoop/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz.asc Fri Jul  7 
17:56:19 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+

[1/2] hadoop git commit: Revert "HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He."

2017-07-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8153fe2bd -> 7cd095272


Revert "HADOOP-14587. Use GenericTestUtils.setLogLevel when available in 
hadoop-common. Contributed by Wenxin He."

This reverts commit 82cb2a6497caa7c5e693aa41ad18e92f1c7eb16a.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fc5dcc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fc5dcc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fc5dcc2

Branch: refs/heads/trunk
Commit: 8fc5dcc2a199c6b202e55c4cfdf5ae4eb09ef003
Parents: 8153fe2
Author: Akira Ajisaka 
Authored: Sat Jul 8 02:53:18 2017 +0900
Committer: Akira Ajisaka 
Committed: Sat Jul 8 02:53:18 2017 +0900

--
 .../fs/FileContextCreateMkdirBaseTest.java  |  2 +-
 .../hadoop/fs/FileContextPermissionBase.java|  5 ++--
 .../apache/hadoop/fs/FileContextUtilBase.java   |  5 ++--
 .../org/apache/hadoop/fs/TestListFiles.java |  4 +--
 .../fs/TestLocalFileSystemPermission.java   |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java  |  7 +++--
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  6 ++--
 .../hadoop/ha/TestZKFailoverController.java |  5 ++--
 .../io/serializer/TestSerializationFactory.java |  6 ++--
 .../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 +
 .../java/org/apache/hadoop/ipc/TestIPC.java |  5 ++--
 .../apache/hadoop/ipc/TestMiniRPCBenchmark.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 16 +--
 .../hadoop/security/TestGroupFallback.java  | 12 
 .../hadoop/security/TestUGIWithMiniKdc.java |  2 +-
 .../security/TestUserGroupInformation.java  |  2 +-
 .../hadoop/security/ssl/TestSSLFactory.java |  2 +-
 .../delegation/web/TestWebDelegationToken.java  |  2 +-
 .../apache/hadoop/test/GenericTestUtils.java| 30 
 .../hadoop/test/TestGenericTestUtils.java   | 10 ---
 .../hadoop/util/Crc32PerformanceTest.java   | 11 ---
 hadoop-common-project/hadoop-nfs/pom.xml|  6 
 .../apache/hadoop/oncrpc/TestFrameDecoder.java  |  6 ++--
 .../TestDFSStripedOutputStreamWithFailure.java  | 28 --
 25 files changed, 59 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index fbd598c..c1de27a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -29,7 +30,6 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
 
 import org.apache.hadoop.test.GenericTestUtils;
-import org.slf4j.event.Level;
 
 /**
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc5dcc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
index 240989e..dff89f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
@@ -23,7 +23,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -33,7 +32,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
@@ -63,7 +61,8 @@ public abstract class 

[2/2] hadoop git commit: HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He.

2017-07-07 Thread aajisaka
HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. 
Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cd09527
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cd09527
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cd09527

Branch: refs/heads/trunk
Commit: 7cd095272caa724d11802690544b38d0baaf247d
Parents: 8fc5dcc
Author: Akira Ajisaka 
Authored: Sat Jul 8 02:54:24 2017 +0900
Committer: Akira Ajisaka 
Committed: Sat Jul 8 02:54:24 2017 +0900

--
 .../fs/FileContextCreateMkdirBaseTest.java  |  2 +-
 .../hadoop/fs/FileContextPermissionBase.java|  5 ++--
 .../apache/hadoop/fs/FileContextUtilBase.java   |  5 ++--
 .../org/apache/hadoop/fs/TestListFiles.java |  4 +--
 .../fs/TestLocalFileSystemPermission.java   |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java  |  7 ++---
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  6 ++--
 .../hadoop/ha/TestZKFailoverController.java |  5 ++--
 .../io/serializer/TestSerializationFactory.java |  6 ++--
 .../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 -
 .../java/org/apache/hadoop/ipc/TestIPC.java |  5 ++--
 .../apache/hadoop/ipc/TestMiniRPCBenchmark.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 16 +--
 .../hadoop/security/TestGroupFallback.java  | 12 
 .../hadoop/security/TestUGIWithMiniKdc.java |  2 +-
 .../security/TestUserGroupInformation.java  |  2 +-
 .../hadoop/security/ssl/TestSSLFactory.java |  2 +-
 .../delegation/web/TestWebDelegationToken.java  |  2 +-
 .../apache/hadoop/test/GenericTestUtils.java| 30 
 .../hadoop/test/TestGenericTestUtils.java   | 10 +++
 .../hadoop/util/Crc32PerformanceTest.java   | 11 +++
 hadoop-common-project/hadoop-nfs/pom.xml|  6 
 .../apache/hadoop/oncrpc/TestFrameDecoder.java  |  6 ++--
 24 files changed, 104 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index c1de27a..fbd598c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -30,6 +29,7 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
 
 import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
 
 /**
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd09527/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
index dff89f9..240989e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -32,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
@@ -61,8 +63,7 @@ public abstract class FileContextPermissionBase {
   
   {
 try {
-  ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
-  .setLevel(org.apache.log4j.Level.DEBUG);
+  GenericTestUtils.setLogLevel(FileSystem.LOG, Level.DEBUG);
 }

[hadoop] Git Push Summary

2017-07-07 Thread wang
Repository: hadoop
Updated Tags:  refs/tags/rel/release-3.0.0-alpha4 [created] b296a54ee

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14548. S3Guard: documenting issues running parallel tests w/ s3n. Contributed by Aaron Fabbri.

2017-07-07 Thread mackrorysd
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 30e1146c5 -> b8ca3bdf3


HADOOP-14548. S3Guard: documenting issues running parallel tests w/ s3n. 
Contributed by Aaron Fabbri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8ca3bdf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8ca3bdf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8ca3bdf

Branch: refs/heads/HADOOP-13345
Commit: b8ca3bdf340622fc5fde8ccad9dc8321d9829add
Parents: 30e1146c
Author: Sean Mackrory 
Authored: Fri Jul 7 10:38:03 2017 -0600
Committer: Sean Mackrory 
Committed: Fri Jul 7 10:51:16 2017 -0600

--
 .../src/site/markdown/tools/hadoop-aws/s3guard.md | 10 --
 .../src/site/markdown/tools/hadoop-aws/testing.md |  4 
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8ca3bdf/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
index 49eb75f..c28e354 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
@@ -443,7 +443,7 @@ at a time until the operations appear to be working.
 mvn -T 1C verify -Dtest=skip -Dit.test=ITestS3AMiscOperations -Ds3guard 
-Ddynamo
 ```
 
-Notes
+### Notes
 
 1. If the `s3guard` profile is not set, then the s3guard properties are those
 of the test configuration set in `contract-test-options.xml` or `auth-keys.xml`
@@ -452,8 +452,14 @@ If the `s3guard` profile *is* set,
 1. The s3guard options from maven (the dynamo and authoritative flags)
   overwrite any previously set. in the configuration files.
 1. Dynamo will be configured to create any missing tables.
-1. 
 
+### Warning About Concurrent Tests
+
+You should not run S3A and S3N tests in parallel on the same bucket.  This is
+especially true when S3Guard is enabled.  S3Guard requires that all clients
+that are modifying the bucket have S3Guard enabled, so having S3N
+integration tests running in parallel with S3A tests will cause strange
+failures.
 
 ### Scale Testing MetadataStore Directly
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8ca3bdf/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
index 0bf2261..dcc6d46 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
@@ -107,6 +107,10 @@ each filesystem for its testing.
 1. `test.fs.s3n.name` : the URL of the bucket for S3n tests
 1. `test.fs.s3a.name` : the URL of the bucket for S3a tests
 
+*Note* that running s3a and s3n tests in parallel mode, against the same bucket
+is unreliable.  We recommend using separate buckets or testing one connector
+at a time.
+
 The contents of each bucket will be destroyed during the test process:
 do not use the bucket for any purpose other than testing. Furthermore, for
 s3a, all in-progress multi-part uploads to the bucket will be aborted at the


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14457. create() does not notify metadataStore of parent directories or ensure they're not existing files. Contributed by Sean Mackrory. [Forced Update!]

2017-07-07 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 930feb100 -> 30e1146c5 (forced update)


HADOOP-14457. create() does not notify metadataStore of parent directories or
ensure they're not existing files.
Contributed by Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30e1146c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30e1146c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30e1146c

Branch: refs/heads/HADOOP-13345
Commit: 30e1146c515d154544707d0e4d4a1c743e6d518f
Parents: 309b8c0
Author: Steve Loughran 
Authored: Fri Jul 7 14:55:50 2017 +0100
Committer: Steve Loughran 
Committed: Fri Jul 7 15:02:22 2017 +0100

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  3 +-
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   | 60 +++
 .../fs/s3a/s3guard/LocalMetadataStore.java  |  7 +++
 .../hadoop/fs/s3a/s3guard/MetadataStore.java| 10 
 .../fs/s3a/s3guard/NullMetadataStore.java   |  5 ++
 .../apache/hadoop/fs/s3a/s3guard/S3Guard.java   | 25 +++-
 .../hadoop/fs/s3a/ITestS3GuardCreate.java   | 61 
 7 files changed, 145 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30e1146c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 878f2f7..3279f1c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -1773,7 +1773,7 @@ public class S3AFileSystem extends FileSystem {
   String key = pathToKey(f);
   createFakeDirectory(key);
 }
-S3Guard.makeDirsOrdered(metadataStore, metadataStoreDirs, username);
+S3Guard.makeDirsOrdered(metadataStore, metadataStoreDirs, username, true);
 // this is complicated because getParent(a/b/c/) returns a/b/c, but
 // we want a/b. See HADOOP-14428 for more details.
 deleteUnnecessaryFakeDirectories(new Path(f.toString()).getParent());
@@ -2292,6 +2292,7 @@ public class S3AFileSystem extends FileSystem {
 // See note about failure semantics in s3guard.md doc.
 try {
   if (hasMetadataStore()) {
+S3Guard.addAncestors(metadataStore, p, username);
 S3AFileStatus status = createUploadFileStatus(p,
 S3AUtils.objectRepresentsDirectory(key, length), length,
 getDefaultBlockSize(p), username);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30e1146c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 784b815..97c9135 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -25,7 +25,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Date;
-import java.util.HashSet;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
@@ -436,6 +436,30 @@ public class DynamoDBMetadataStore implements 
MetadataStore {
 }
   }
 
+  Collection completeAncestry(
+  Collection pathsToCreate) {
+// Key on path to allow fast lookup
+Map ancestry = new HashMap<>();
+
+for (PathMetadata meta : pathsToCreate) {
+  Preconditions.checkArgument(meta != null);
+  Path path = meta.getFileStatus().getPath();
+  if (path.isRoot()) {
+break;
+  }
+  ancestry.put(path, meta);
+  Path parent = path.getParent();
+  while (!parent.isRoot() && !ancestry.containsKey(parent)) {
+LOG.debug("auto-create ancestor path {} for child path {}",
+parent, path);
+final FileStatus status = makeDirStatus(parent, username);
+ancestry.put(parent, new PathMetadata(status, Tristate.FALSE, false));
+parent = parent.getParent();
+  }
+}
+ return ancestry.values();
+   }
+
   @Override
   public void move(Collection pathsToDelete,
   Collection pathsToCreate) throws IOException {
@@ -457,27 +481,7 @@ public class DynamoDBMetadataStore 

hadoop git commit: HADOOP-14457. create() does not notify metadataStore of parent directories or ensure they're not existing files. Contributed by Sean Mackrory.

2017-07-07 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 309b8c0f8 -> 930feb100


HADOOP-14457. create() does not notify metadataStore of parent directories or
ensure they're not existing files.
Contributed by Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/930feb10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/930feb10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/930feb10

Branch: refs/heads/HADOOP-13345
Commit: 930feb10050eb898c7b94b1c94b398e2e92f73e3
Parents: 309b8c0
Author: Steve Loughran 
Authored: Fri Jul 7 14:55:50 2017 +0100
Committer: Steve Loughran 
Committed: Fri Jul 7 14:55:50 2017 +0100

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  3 +-
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   | 60 +++
 .../fs/s3a/s3guard/LocalMetadataStore.java  |  7 +++
 .../hadoop/fs/s3a/s3guard/MetadataStore.java| 10 
 .../fs/s3a/s3guard/NullMetadataStore.java   |  5 ++
 .../apache/hadoop/fs/s3a/s3guard/S3Guard.java   | 25 +++-
 .../hadoop/fs/s3a/ITestS3GuardCreate.java   | 61 
 7 files changed, 145 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/930feb10/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 878f2f7..3279f1c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -1773,7 +1773,7 @@ public class S3AFileSystem extends FileSystem {
   String key = pathToKey(f);
   createFakeDirectory(key);
 }
-S3Guard.makeDirsOrdered(metadataStore, metadataStoreDirs, username);
+S3Guard.makeDirsOrdered(metadataStore, metadataStoreDirs, username, true);
 // this is complicated because getParent(a/b/c/) returns a/b/c, but
 // we want a/b. See HADOOP-14428 for more details.
 deleteUnnecessaryFakeDirectories(new Path(f.toString()).getParent());
@@ -2292,6 +2292,7 @@ public class S3AFileSystem extends FileSystem {
 // See note about failure semantics in s3guard.md doc.
 try {
   if (hasMetadataStore()) {
+S3Guard.addAncestors(metadataStore, p, username);
 S3AFileStatus status = createUploadFileStatus(p,
 S3AUtils.objectRepresentsDirectory(key, length), length,
 getDefaultBlockSize(p), username);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/930feb10/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 784b815..1c3d701 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -25,7 +25,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Date;
-import java.util.HashSet;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
@@ -436,6 +436,30 @@ public class DynamoDBMetadataStore implements 
MetadataStore {
 }
   }
 
+   Collection completeAncestry(
+   Collection pathsToCreate) {
+ // Key on path to allow fast lookup
+ Map ancestry = new HashMap<>();
+
+ for (PathMetadata meta : pathsToCreate) {
+   Preconditions.checkArgument(meta != null);
+   Path path = meta.getFileStatus().getPath();
+   if (path.isRoot()) {
+ break;
+   }
+   ancestry.put(path, meta);
+   Path parent = path.getParent();
+   while (!parent.isRoot() && !ancestry.containsKey(parent)) {
+ LOG.debug("auto-create ancestor path {} for child path {}",
+ parent, path);
+ final FileStatus status = makeDirStatus(parent, username);
+ ancestry.put(parent, new PathMetadata(status, Tristate.FALSE, false));
+ parent = parent.getParent();
+   }
+ }
+ return ancestry.values();
+   }
+
   @Override
   public void move(Collection pathsToDelete,
   Collection pathsToCreate) throws IOException {
@@ -457,27 +481,7 @@ public class 

hadoop git commit: HADOOP-14563. LoadBalancingKMSClientProvider#warmUpEncryptedKeys swallows IOException. Contributed by Rushabh S Shah.

2017-07-07 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8949a9d54 -> d657c0517


HADOOP-14563. LoadBalancingKMSClientProvider#warmUpEncryptedKeys swallows 
IOException. Contributed by Rushabh S Shah.

(cherry picked from commit 8153fe2bd35fb4df0b64f93ac0046e34d4807ac3)
(cherry picked from commit 730b21e9f90524673932411b48e1cb04dd3633d0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d657c051
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d657c051
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d657c051

Branch: refs/heads/branch-2.8
Commit: d657c05177d041ff1428ad817c9531d0cbd71281
Parents: 8949a9d
Author: Wei-Chiu Chuang 
Authored: Fri Jul 7 06:13:10 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Jul 7 06:14:32 2017 -0700

--
 .../key/kms/LoadBalancingKMSClientProvider.java | 12 +++-
 .../kms/TestLoadBalancingKMSClientProvider.java | 63 
 2 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d657c051/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index 6a9bd62..e17b507 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -38,6 +38,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 /**
  * A simple LoadBalancing KMSClientProvider that round-robins requests
@@ -158,15 +159,24 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   // This request is sent to all providers in the load-balancing group
   @Override
   public void warmUpEncryptedKeys(String... keyNames) throws IOException {
+Preconditions.checkArgument(providers.length > 0,
+"No providers are configured");
+boolean success = false;
+IOException e = null;
 for (KMSClientProvider provider : providers) {
   try {
 provider.warmUpEncryptedKeys(keyNames);
+success = true;
   } catch (IOException ioe) {
+e = ioe;
 LOG.error(
 "Error warming up keys for provider with url"
-+ "[" + provider.getKMSUrl() + "]");
++ "[" + provider.getKMSUrl() + "]", ioe);
   }
 }
+if (!success && e != null) {
+  throw e;
+}
   }
 
   // This request is sent to all providers in the load-balancing group

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d657c051/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index 48acf6c..8a2e87f 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider.Options;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -257,4 +258,66 @@ public class TestLoadBalancingKMSClientProvider {
   "AuthenticationException"));
 }
   }
+
+  /**
+   * tests {@link 
LoadBalancingKMSClientProvider#warmUpEncryptedKeys(String...)}
+   * error handling in case when all the providers throws {@link IOException}.
+   * @throws Exception
+   */
+  @Test
+  public void testWarmUpEncryptedKeysWhenAllProvidersFail() throws Exception {
+Configuration conf = new Configuration();
+KMSClientProvider p1 = mock(KMSClientProvider.class);
+String keyName = "key1";
+Mockito.doThrow(new IOException(new 

hadoop git commit: HADOOP-14563. LoadBalancingKMSClientProvider#warmUpEncryptedKeys swallows IOException. Contributed by Rushabh S Shah.

2017-07-07 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eda4ac07c -> 730b21e9f


HADOOP-14563. LoadBalancingKMSClientProvider#warmUpEncryptedKeys swallows 
IOException. Contributed by Rushabh S Shah.

(cherry picked from commit 8153fe2bd35fb4df0b64f93ac0046e34d4807ac3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/730b21e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/730b21e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/730b21e9

Branch: refs/heads/branch-2
Commit: 730b21e9f90524673932411b48e1cb04dd3633d0
Parents: eda4ac0
Author: Wei-Chiu Chuang 
Authored: Fri Jul 7 06:13:10 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Jul 7 06:14:07 2017 -0700

--
 .../key/kms/LoadBalancingKMSClientProvider.java | 12 +++-
 .../kms/TestLoadBalancingKMSClientProvider.java | 63 
 2 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/730b21e9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index e6ff079..e5b82d0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -39,6 +39,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 /**
  * A simple LoadBalancing KMSClientProvider that round-robins requests
@@ -159,15 +160,24 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   // This request is sent to all providers in the load-balancing group
   @Override
   public void warmUpEncryptedKeys(String... keyNames) throws IOException {
+Preconditions.checkArgument(providers.length > 0,
+"No providers are configured");
+boolean success = false;
+IOException e = null;
 for (KMSClientProvider provider : providers) {
   try {
 provider.warmUpEncryptedKeys(keyNames);
+success = true;
   } catch (IOException ioe) {
+e = ioe;
 LOG.error(
 "Error warming up keys for provider with url"
-+ "[" + provider.getKMSUrl() + "]");
++ "[" + provider.getKMSUrl() + "]", ioe);
   }
 }
+if (!success && e != null) {
+  throw e;
+}
   }
 
   // This request is sent to all providers in the load-balancing group

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730b21e9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index 48acf6c..8a2e87f 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider.Options;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -257,4 +258,66 @@ public class TestLoadBalancingKMSClientProvider {
   "AuthenticationException"));
 }
   }
+
+  /**
+   * tests {@link 
LoadBalancingKMSClientProvider#warmUpEncryptedKeys(String...)}
+   * error handling in case when all the providers throws {@link IOException}.
+   * @throws Exception
+   */
+  @Test
+  public void testWarmUpEncryptedKeysWhenAllProvidersFail() throws Exception {
+Configuration conf = new Configuration();
+KMSClientProvider p1 = mock(KMSClientProvider.class);
+String keyName = "key1";
+Mockito.doThrow(new IOException(new AuthorizationException("p1"))).when(p1)
+

hadoop git commit: HADOOP-14563. LoadBalancingKMSClientProvider#warmUpEncryptedKeys swallows IOException. Contributed by Rushabh S Shah.

2017-07-07 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 82cb2a649 -> 8153fe2bd


HADOOP-14563. LoadBalancingKMSClientProvider#warmUpEncryptedKeys swallows 
IOException. Contributed by Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8153fe2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8153fe2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8153fe2b

Branch: refs/heads/trunk
Commit: 8153fe2bd35fb4df0b64f93ac0046e34d4807ac3
Parents: 82cb2a6
Author: Wei-Chiu Chuang 
Authored: Fri Jul 7 06:13:10 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Jul 7 06:13:10 2017 -0700

--
 .../key/kms/LoadBalancingKMSClientProvider.java | 12 +++-
 .../kms/TestLoadBalancingKMSClientProvider.java | 63 
 2 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8153fe2b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index aa24993..de9c988 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -39,6 +39,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 /**
  * A simple LoadBalancing KMSClientProvider that round-robins requests
@@ -159,15 +160,24 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   // This request is sent to all providers in the load-balancing group
   @Override
   public void warmUpEncryptedKeys(String... keyNames) throws IOException {
+Preconditions.checkArgument(providers.length > 0,
+"No providers are configured");
+boolean success = false;
+IOException e = null;
 for (KMSClientProvider provider : providers) {
   try {
 provider.warmUpEncryptedKeys(keyNames);
+success = true;
   } catch (IOException ioe) {
+e = ioe;
 LOG.error(
 "Error warming up keys for provider with url"
-+ "[" + provider.getKMSUrl() + "]");
++ "[" + provider.getKMSUrl() + "]", ioe);
   }
 }
+if (!success && e != null) {
+  throw e;
+}
   }
 
   // This request is sent to all providers in the load-balancing group

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8153fe2b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index 499b991..d14dd59 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider.Options;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -257,4 +258,66 @@ public class TestLoadBalancingKMSClientProvider {
   "AuthenticationException"));
 }
   }
+
+  /**
+   * tests {@link 
LoadBalancingKMSClientProvider#warmUpEncryptedKeys(String...)}
+   * error handling in case when all the providers throws {@link IOException}.
+   * @throws Exception
+   */
+  @Test
+  public void testWarmUpEncryptedKeysWhenAllProvidersFail() throws Exception {
+Configuration conf = new Configuration();
+KMSClientProvider p1 = mock(KMSClientProvider.class);
+String keyName = "key1";
+Mockito.doThrow(new IOException(new AuthorizationException("p1"))).when(p1)
+.warmUpEncryptedKeys(Mockito.anyString());
+KMSClientProvider p2 = mock(KMSClientProvider.class);
+  

hadoop git commit: HDFS-11679. Ozone: SCM CLI: Implement list container command. Contributed by Yuanbo Liu.

2017-07-07 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 8efb1fafd -> 5fd38a6dc


HDFS-11679. Ozone: SCM CLI: Implement list container command. Contributed by 
Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5fd38a6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5fd38a6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5fd38a6d

Branch: refs/heads/HDFS-7240
Commit: 5fd38a6dc13601f51b8c322c8875797a3c6cb93b
Parents: 8efb1fa
Author: Weiwei Yang 
Authored: Fri Jul 7 15:54:21 2017 +0800
Committer: Weiwei Yang 
Committed: Fri Jul 7 15:54:21 2017 +0800

--
 .../scm/client/ContainerOperationClient.java|  12 ++
 .../org/apache/hadoop/scm/client/ScmClient.java |  16 +++
 .../scm/container/common/helpers/Pipeline.java  |  65 ++
 .../StorageContainerLocationProtocol.java   |  20 
 ...rLocationProtocolClientSideTranslatorPB.java |  35 ++
 .../StorageContainerLocationProtocol.proto  |  12 ++
 ...rLocationProtocolServerSideTranslatorPB.java |  38 ++
 .../ozone/scm/StorageContainerManager.java  |   9 ++
 .../cli/container/ContainerCommandHandler.java  |   8 ++
 .../scm/cli/container/ListContainerHandler.java | 120 +++
 .../ozone/scm/container/ContainerMapping.java   |  35 ++
 .../hadoop/ozone/scm/container/Mapping.java |  20 
 .../hadoop/cblock/util/MockStorageClient.java   |  20 
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java | 115 +-
 14 files changed, 524 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fd38a6d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java
index f109516..481732a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java
@@ -28,6 +28,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.UUID;
 
 /**
@@ -157,6 +158,17 @@ public class ContainerOperationClient implements ScmClient 
{
   }
 
   /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List listContainer(String startName,
+  String prefixName, int count)
+  throws IOException {
+return storageContainerLocationClient.listContainer(
+startName, prefixName, count);
+  }
+
+  /**
* Get meta data from an existing container.
*
* @param pipeline - pipeline that represents the container.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fd38a6d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ScmClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ScmClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ScmClient.java
index 67d6820..22180b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ScmClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ScmClient.java
@@ -22,6 +22,7 @@ import 
org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerData
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 
 import java.io.IOException;
+import java.util.List;
 
 /**
  * The interface to call into underlying container layer.
@@ -60,6 +61,21 @@ public interface ScmClient {
   void deleteContainer(Pipeline pipeline, boolean force) throws IOException;
 
   /**
+   * Lists a range of containers and get the pipelines info.
+   *
+   * @param startName start name, if null, start searching at the head.
+   * @param prefixName prefix name, if null, then filter is disabled.
+   * @param count count, if count < 0, the max size is unlimited.(
+   *  Usually the count will be replace with a very big
+   *  value instead of being unlimited in case the db is very big)
+   *
+   * @return a list of pipeline.
+   * @throws IOException
+   */
+  List listContainer(String startName, String prefixName, int count)
+  throws IOException;
+
+  /**
* Read meta data from an existing container.
* @param pipeline - Pipeline that represents the container.
* @return 

hadoop git commit: HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. Contributed by Wenxin He.

2017-07-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7576a688e -> 82cb2a649


HADOOP-14587. Use GenericTestUtils.setLogLevel when available in hadoop-common. 
Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82cb2a64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82cb2a64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82cb2a64

Branch: refs/heads/trunk
Commit: 82cb2a6497caa7c5e693aa41ad18e92f1c7eb16a
Parents: 7576a68
Author: Akira Ajisaka 
Authored: Fri Jul 7 14:55:46 2017 +0900
Committer: Akira Ajisaka 
Committed: Fri Jul 7 15:00:47 2017 +0900

--
 .../fs/FileContextCreateMkdirBaseTest.java  |  2 +-
 .../hadoop/fs/FileContextPermissionBase.java|  5 ++--
 .../apache/hadoop/fs/FileContextUtilBase.java   |  5 ++--
 .../org/apache/hadoop/fs/TestListFiles.java |  4 +--
 .../fs/TestLocalFileSystemPermission.java   |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java  |  7 ++---
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  6 ++--
 .../hadoop/ha/TestZKFailoverController.java |  5 ++--
 .../io/serializer/TestSerializationFactory.java |  6 ++--
 .../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 -
 .../java/org/apache/hadoop/ipc/TestIPC.java |  5 ++--
 .../apache/hadoop/ipc/TestMiniRPCBenchmark.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 16 +--
 .../hadoop/security/TestGroupFallback.java  | 12 
 .../hadoop/security/TestUGIWithMiniKdc.java |  2 +-
 .../security/TestUserGroupInformation.java  |  2 +-
 .../hadoop/security/ssl/TestSSLFactory.java |  2 +-
 .../delegation/web/TestWebDelegationToken.java  |  2 +-
 .../apache/hadoop/test/GenericTestUtils.java| 30 
 .../hadoop/test/TestGenericTestUtils.java   | 10 +++
 .../hadoop/util/Crc32PerformanceTest.java   | 11 +++
 hadoop-common-project/hadoop-nfs/pom.xml|  6 
 .../apache/hadoop/oncrpc/TestFrameDecoder.java  |  6 ++--
 .../TestDFSStripedOutputStreamWithFailure.java  | 28 ++
 25 files changed, 132 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
index c1de27a..fbd598c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -30,6 +29,7 @@ import static 
org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
 
 import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
 
 /**
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cb2a64/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
index dff89f9..240989e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -32,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
@@ -61,8 +63,7 @@ public abstract class FileContextPermissionBase {
   
   {
 try {
-