svn commit: r1847745 - /hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

2018-11-29 Thread xiao
Author: xiao
Date: Thu Nov 29 18:20:01 2018
New Revision: 1847745

URL: http://svn.apache.org/viewvc?rev=1847745=rev
Log:
update xiao

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1847745=1847744=1847745=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Thu 
Nov 29 18:20:01 2018
@@ -626,8 +626,8 @@

  xiao
  Xiao Chen
- Cloudera
- HDFS
+ Netflix
+ 
  -8

 
@@ -1719,8 +1719,8 @@

  xiao
  Xiao Chen
- Cloudera
- HDFS
+ Netflix
+ 
  -8

 



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel."

2018-11-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 abd9d93a5 -> 8ab6aa1b4


Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is 
set. Contributed by Zsolt Venczel."

This reverts commit 0424715207cd07debeee5c624973e9db90d36fb6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ab6aa1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ab6aa1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ab6aa1b

Branch: refs/heads/branch-3.1
Commit: 8ab6aa1b4274a0d3bae3a4ab3b7e6ca252227e39
Parents: abd9d93
Author: Xiao Chen 
Authored: Tue Nov 13 12:45:35 2018 -0800
Committer: Xiao Chen 
Committed: Tue Nov 13 12:46:03 2018 -0800

--
 .../main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 12 ++--
 .../src/test/resources/testErasureCodingConf.xml|  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab6aa1b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 56d453b..56706b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -357,16 +357,16 @@ public class ECAdmin extends Configured implements Tool {
   final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
-
-String actualECPolicyName = dfs.getErasureCodingPolicy(p).getName();
-
-System.out.println("Set " + actualECPolicyName +
-" erasure coding policy on "+ path);
+if (ecPolicyName == null){
+  ecPolicyName = "default";
+}
+System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
+" " + path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
   "non-empty directory will not automatically convert existing " +
-  "files to " + actualECPolicyName + " erasure coding policy");
+  "files to " + ecPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab6aa1b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index b47d50f..9070367 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -710,7 +710,7 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
   
 
@@ -728,11 +728,11 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel."

2018-11-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 f214af74b -> 5afd7efe2


Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is 
set. Contributed by Zsolt Venczel."

This reverts commit 7dc79a8b5b7af0bf37d25a221be8ed446b0edb74.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5afd7efe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5afd7efe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5afd7efe

Branch: refs/heads/branch-3.2
Commit: 5afd7efe24e3c40bb01f3b6baa9c25f72797e42d
Parents: f214af7
Author: Xiao Chen 
Authored: Tue Nov 13 12:45:02 2018 -0800
Committer: Xiao Chen 
Committed: Tue Nov 13 12:45:55 2018 -0800

--
 .../main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 12 ++--
 .../src/test/resources/testErasureCodingConf.xml|  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5afd7efe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 903a1e2..5f8626e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -358,16 +358,16 @@ public class ECAdmin extends Configured implements Tool {
   final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
-
-String actualECPolicyName = dfs.getErasureCodingPolicy(p).getName();
-
-System.out.println("Set " + actualECPolicyName +
-" erasure coding policy on "+ path);
+if (ecPolicyName == null){
+  ecPolicyName = "default";
+}
+System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
+" " + path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
   "non-empty directory will not automatically convert existing " +
-  "files to " + actualECPolicyName + " erasure coding policy");
+  "files to " + ecPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5afd7efe/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 34f5176..6411fe6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -734,7 +734,7 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
   
 
@@ -752,11 +752,11 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel."

2018-11-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 762a56cc6 -> 9da6054ca


Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is 
set. Contributed by Zsolt Venczel."

This reverts commit 7dc79a8b5b7af0bf37d25a221be8ed446b0edb74.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9da6054c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9da6054c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9da6054c

Branch: refs/heads/trunk
Commit: 9da6054ca4ff6f8bb19506d80685b17d2c79
Parents: 762a56c
Author: Xiao Chen 
Authored: Tue Nov 13 12:43:58 2018 -0800
Committer: Xiao Chen 
Committed: Tue Nov 13 12:44:25 2018 -0800

--
 .../main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 12 ++--
 .../src/test/resources/testErasureCodingConf.xml|  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9da6054c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 903a1e2..5f8626e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -358,16 +358,16 @@ public class ECAdmin extends Configured implements Tool {
   final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
-
-String actualECPolicyName = dfs.getErasureCodingPolicy(p).getName();
-
-System.out.println("Set " + actualECPolicyName +
-" erasure coding policy on "+ path);
+if (ecPolicyName == null){
+  ecPolicyName = "default";
+}
+System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
+" " + path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
   "non-empty directory will not automatically convert existing " +
-  "files to " + actualECPolicyName + " erasure coding policy");
+  "files to " + ecPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9da6054c/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 34f5176..6411fe6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -734,7 +734,7 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
   
 
@@ -752,11 +752,11 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDFS-14039. ec -listPolicies doesn't show correct state for the default policy when the default is not RS(6, 3). Contributed by Kitti Nanasi.

2018-11-08 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 be2a0a5e2 -> bbc94de47


HDFS-14039. ec -listPolicies doesn't show correct state for the default policy 
when the default is not RS(6,3). Contributed by Kitti Nanasi.

Signed-off-by: Xiao Chen 
(cherry picked from commit 8d99648c203004045a9339ad27258092969145d6)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java

(cherry picked from commit 01b8197893c84fa24bb94945f85b3e2ebe8259a5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbc94de4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbc94de4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbc94de4

Branch: refs/heads/branch-3.0
Commit: bbc94de4779265a5cc02b6a4b9f903aeea9b8400
Parents: 76be351
Author: Kitti Nanasi 
Authored: Thu Nov 8 10:00:09 2018 -0800
Committer: Xiao Chen 
Committed: Thu Nov 8 10:22:32 2018 -0800

--
 .../namenode/ErasureCodingPolicyManager.java| 119 ++-
 .../server/namenode/FSImageFormatProtobuf.java  |   4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/namenode/TestEnabledECPolicies.java  | 103 +++-
 .../hdfs/server/namenode/TestFSImage.java   |  42 +--
 .../server/namenode/TestNamenodeRetryCache.java |   2 +-
 .../server/namenode/TestStripedINodeFile.java   |   2 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |   2 +-
 8 files changed, 231 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbc94de4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index e7de05b..b840480 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -80,6 +81,15 @@ public final class ErasureCodingPolicyManager {
   private ErasureCodingPolicyInfo[] allPolicies;
 
   /**
+   * All policies in the state as it will be persisted in the fsimage.
+   *
+   * The difference between persisted policies and all policies is that
+   * if a default policy is only enabled at startup,
+   * it will appear as disabled in the persisted policy list and in the 
fsimage.
+   */
+  private Map allPersistedPolicies;
+
+  /**
* All enabled policies sorted by name for fast querying, including built-in
* policy, user defined policy.
*/
@@ -89,6 +99,7 @@ public final class ErasureCodingPolicyManager {
*/
   private ErasureCodingPolicy[] enabledPolicies;
 
+  private String defaultPolicyName;
 
   private volatile static ErasureCodingPolicyManager instance = null;
 
@@ -101,14 +112,11 @@ public final class ErasureCodingPolicyManager {
 
   private ErasureCodingPolicyManager() {}
 
-  public void init(Configuration conf) {
-// Load erasure coding default policy
-final String defaultPolicyName = conf.getTrimmed(
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
+  public void init(Configuration conf) throws IOException {
 this.policiesByName = new TreeMap<>();
 this.policiesByID = new TreeMap<>();
 this.enabledPoliciesByName = new TreeMap<>();
+this.allPersistedPolicies = new TreeMap<>();
 
 /**
  * TODO: load user defined EC policy from fsImage HDFS-7859
@@ -124,31 +132,12 @@ public final class ErasureCodingPolicyManager {
   final ErasureCodingPolicyInfo info = new ErasureCodingPolicyInfo(policy);
   policiesByName.put(policy.getName(), info);
   policiesByID.put(policy.getId(), info);
+  allPersistedPolicies.put(policy.getId(),
+  new ErasureCodingPolicyInfo(policy));
 }
 
-if (!defaultPolicyName.isEmpty()) {
-  final ErasureCodingPolicyInfo info =
-  policiesByName.get(defaultPolicyName);
-  if (info == null) {
-String names = policiesByName.values()
-.stream().map((pi) -> pi.getPolicy().getName())
-.collect(Collectors.joini

[2/2] hadoop git commit: HDFS-13772. Erasure coding: Unnecessary NameNode Logs displaying for Enabling/Disabling Erasure coding policies which are already enabled/disabled. Contributed by Ayush Saxena

2018-11-08 Thread xiao
HDFS-13772. Erasure coding: Unnecessary NameNode Logs displaying for 
Enabling/Disabling Erasure coding policies which are already enabled/disabled. 
Contributed by Ayush Saxena

(cherry picked from commit 8df2eb8119188b8e5515295523afc23046e1db81)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76be3515
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76be3515
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76be3515

Branch: refs/heads/branch-3.0
Commit: 76be3515bfb926464959b3151ef1b75039a00aab
Parents: be2a0a5
Author: Vinayakumar B 
Authored: Tue Aug 21 09:33:19 2018 +0530
Committer: Xiao Chen 
Committed: Thu Nov 8 10:22:32 2018 -0800

--
 .../namenode/ErasureCodingPolicyManager.java| 15 +++
 .../server/namenode/FSDirErasureCodingOp.java   | 22 -
 .../hdfs/server/namenode/FSNamesystem.java  | 26 +++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 +++---
 .../server/namenode/TestNamenodeRetryCache.java |  2 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |  2 +-
 6 files changed, 47 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76be3515/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 3a310da..e7de05b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -356,7 +356,7 @@ public final class ErasureCodingPolicyManager {
   /**
* Disable an erasure coding policy by policyName.
*/
-  public synchronized void disablePolicy(String name) {
+  public synchronized boolean disablePolicy(String name) {
 ErasureCodingPolicyInfo info = policiesByName.get(name);
 if (info == null) {
   throw new HadoopIllegalArgumentException("The policy name " +
@@ -367,27 +367,32 @@ public final class ErasureCodingPolicyManager {
   enabledPoliciesByName.remove(name);
   enabledPolicies =
   enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
+  info.setState(ErasureCodingPolicyState.DISABLED);
+  LOG.info("Disable the erasure coding policy " + name);
+  return true;
 }
-info.setState(ErasureCodingPolicyState.DISABLED);
-LOG.info("Disable the erasure coding policy " + name);
+return false;
   }
 
   /**
* Enable an erasure coding policy by policyName.
*/
-  public synchronized void enablePolicy(String name) {
+  public synchronized boolean enablePolicy(String name) {
 final ErasureCodingPolicyInfo info = policiesByName.get(name);
 if (info == null) {
   throw new HadoopIllegalArgumentException("The policy name " +
   name + " does not exist");
 }
-
+if (enabledPoliciesByName.containsKey(name)) {
+  return false;
+}
 final ErasureCodingPolicy ecPolicy = info.getPolicy();
 enabledPoliciesByName.put(name, ecPolicy);
 info.setState(ErasureCodingPolicyState.ENABLED);
 enabledPolicies =
 enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
 LOG.info("Enable the erasure coding policy " + name);
+return true;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76be3515/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 3a32db4..f6a4093 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -252,11 +252,16 @@ final class FSDirErasureCodingOp {
*  rebuilding
* @throws IOException
*/
-  static void enableErasureCodingPolicy(final FSNamesystem fsn,
+  static boolean enableErasureCodingPolicy(final FSNamesystem fsn,
   String ecPolicyName, final boolean logRetryCache) throw

hadoop git commit: HDFS-14039. ec -listPolicies doesn't show correct state for the default policy when the default is not RS(6, 3). Contributed by Kitti Nanasi.

2018-11-08 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a3b61baf9 -> 01b819789


HDFS-14039. ec -listPolicies doesn't show correct state for the default policy 
when the default is not RS(6,3). Contributed by Kitti Nanasi.

Signed-off-by: Xiao Chen 
(cherry picked from commit 8d99648c203004045a9339ad27258092969145d6)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01b81978
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01b81978
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01b81978

Branch: refs/heads/branch-3.1
Commit: 01b8197893c84fa24bb94945f85b3e2ebe8259a5
Parents: a3b61ba
Author: Kitti Nanasi 
Authored: Thu Nov 8 10:00:09 2018 -0800
Committer: Xiao Chen 
Committed: Thu Nov 8 10:08:34 2018 -0800

--
 .../namenode/ErasureCodingPolicyManager.java| 119 ++-
 .../server/namenode/FSImageFormatProtobuf.java  |   4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/namenode/TestEnabledECPolicies.java  | 103 +++-
 .../hdfs/server/namenode/TestFSImage.java   |  42 +--
 .../server/namenode/TestNamenodeRetryCache.java |   2 +-
 .../server/namenode/TestStripedINodeFile.java   |   2 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |   2 +-
 8 files changed, 231 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01b81978/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index e7de05b..b840480 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -80,6 +81,15 @@ public final class ErasureCodingPolicyManager {
   private ErasureCodingPolicyInfo[] allPolicies;
 
   /**
+   * All policies in the state as it will be persisted in the fsimage.
+   *
+   * The difference between persisted policies and all policies is that
+   * if a default policy is only enabled at startup,
+   * it will appear as disabled in the persisted policy list and in the 
fsimage.
+   */
+  private Map allPersistedPolicies;
+
+  /**
* All enabled policies sorted by name for fast querying, including built-in
* policy, user defined policy.
*/
@@ -89,6 +99,7 @@ public final class ErasureCodingPolicyManager {
*/
   private ErasureCodingPolicy[] enabledPolicies;
 
+  private String defaultPolicyName;
 
   private volatile static ErasureCodingPolicyManager instance = null;
 
@@ -101,14 +112,11 @@ public final class ErasureCodingPolicyManager {
 
   private ErasureCodingPolicyManager() {}
 
-  public void init(Configuration conf) {
-// Load erasure coding default policy
-final String defaultPolicyName = conf.getTrimmed(
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
+  public void init(Configuration conf) throws IOException {
 this.policiesByName = new TreeMap<>();
 this.policiesByID = new TreeMap<>();
 this.enabledPoliciesByName = new TreeMap<>();
+this.allPersistedPolicies = new TreeMap<>();
 
 /**
  * TODO: load user defined EC policy from fsImage HDFS-7859
@@ -124,31 +132,12 @@ public final class ErasureCodingPolicyManager {
   final ErasureCodingPolicyInfo info = new ErasureCodingPolicyInfo(policy);
   policiesByName.put(policy.getName(), info);
   policiesByID.put(policy.getId(), info);
+  allPersistedPolicies.put(policy.getId(),
+  new ErasureCodingPolicyInfo(policy));
 }
 
-if (!defaultPolicyName.isEmpty()) {
-  final ErasureCodingPolicyInfo info =
-  policiesByName.get(defaultPolicyName);
-  if (info == null) {
-String names = policiesByName.values()
-.stream().map((pi) -> pi.getPolicy().getName())
-.collect(Collectors.joining(", "));
-String msg = String.format("EC policy '%s' specifi

hadoop git commit: HDFS-14039. ec -listPolicies doesn't show correct state for the default policy when the default is not RS(6, 3). Contributed by Kitti Nanasi.

2018-11-08 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 66715005f -> 57409874b


HDFS-14039. ec -listPolicies doesn't show correct state for the default policy 
when the default is not RS(6,3). Contributed by Kitti Nanasi.

Signed-off-by: Xiao Chen 
(cherry picked from commit 8d99648c203004045a9339ad27258092969145d6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57409874
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57409874
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57409874

Branch: refs/heads/branch-3.2
Commit: 57409874bb7ad6a4f9a68ee22dfe294d7126419f
Parents: 6671500
Author: Kitti Nanasi 
Authored: Thu Nov 8 10:00:09 2018 -0800
Committer: Xiao Chen 
Committed: Thu Nov 8 10:08:26 2018 -0800

--
 .../namenode/ErasureCodingPolicyManager.java| 119 ++-
 .../server/namenode/FSImageFormatProtobuf.java  |   4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/namenode/TestEnabledECPolicies.java  | 103 +++-
 .../hdfs/server/namenode/TestFSImage.java   |  42 +--
 .../server/namenode/TestNamenodeRetryCache.java |   2 +-
 .../server/namenode/TestStripedINodeFile.java   |   2 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |   2 +-
 8 files changed, 231 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57409874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index d2bf3af..57fa958 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -81,6 +82,15 @@ public final class ErasureCodingPolicyManager {
   private ErasureCodingPolicyInfo[] allPolicies;
 
   /**
+   * All policies in the state as it will be persisted in the fsimage.
+   *
+   * The difference between persisted policies and all policies is that
+   * if a default policy is only enabled at startup,
+   * it will appear as disabled in the persisted policy list and in the 
fsimage.
+   */
+  private Map allPersistedPolicies;
+
+  /**
* All enabled policies sorted by name for fast querying, including built-in
* policy, user defined policy.
*/
@@ -90,6 +100,7 @@ public final class ErasureCodingPolicyManager {
*/
   private ErasureCodingPolicy[] enabledPolicies;
 
+  private String defaultPolicyName;
 
   private volatile static ErasureCodingPolicyManager instance = null;
 
@@ -102,14 +113,11 @@ public final class ErasureCodingPolicyManager {
 
   private ErasureCodingPolicyManager() {}
 
-  public void init(Configuration conf) {
-// Load erasure coding default policy
-final String defaultPolicyName = conf.getTrimmed(
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
+  public void init(Configuration conf) throws IOException {
 this.policiesByName = new TreeMap<>();
 this.policiesByID = new TreeMap<>();
 this.enabledPoliciesByName = new TreeMap<>();
+this.allPersistedPolicies = new TreeMap<>();
 
 /**
  * TODO: load user defined EC policy from fsImage HDFS-7859
@@ -125,31 +133,12 @@ public final class ErasureCodingPolicyManager {
   final ErasureCodingPolicyInfo info = new ErasureCodingPolicyInfo(policy);
   policiesByName.put(policy.getName(), info);
   policiesByID.put(policy.getId(), info);
+  allPersistedPolicies.put(policy.getId(),
+  new ErasureCodingPolicyInfo(policy));
 }
 
-if (!defaultPolicyName.isEmpty()) {
-  final ErasureCodingPolicyInfo info =
-  policiesByName.get(defaultPolicyName);
-  if (info == null) {
-String names = policiesByName.values()
-.stream().map((pi) -> pi.getPolicy().getName())
-.collect(Collectors.joining(", "));
-String msg = String.format("EC policy '%s' specified at %s is not a "
-+ "valid policy. Please choose from list of available "
-  

hadoop git commit: HDFS-14039. ec -listPolicies doesn't show correct state for the default policy when the default is not RS(6, 3). Contributed by Kitti Nanasi.

2018-11-08 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 724c15007 -> 8d99648c2


HDFS-14039. ec -listPolicies doesn't show correct state for the default policy 
when the default is not RS(6,3). Contributed by Kitti Nanasi.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d99648c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d99648c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d99648c

Branch: refs/heads/trunk
Commit: 8d99648c203004045a9339ad27258092969145d6
Parents: 724c150
Author: Kitti Nanasi 
Authored: Thu Nov 8 10:00:09 2018 -0800
Committer: Xiao Chen 
Committed: Thu Nov 8 10:01:19 2018 -0800

--
 .../namenode/ErasureCodingPolicyManager.java| 119 ++-
 .../server/namenode/FSImageFormatProtobuf.java  |   4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/namenode/TestEnabledECPolicies.java  | 103 +++-
 .../hdfs/server/namenode/TestFSImage.java   |  42 +--
 .../server/namenode/TestNamenodeRetryCache.java |   2 +-
 .../server/namenode/TestStripedINodeFile.java   |   2 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |   2 +-
 8 files changed, 231 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d99648c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index d2bf3af..57fa958 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -81,6 +82,15 @@ public final class ErasureCodingPolicyManager {
   private ErasureCodingPolicyInfo[] allPolicies;
 
   /**
+   * All policies in the state as it will be persisted in the fsimage.
+   *
+   * The difference between persisted policies and all policies is that
+   * if a default policy is only enabled at startup,
+   * it will appear as disabled in the persisted policy list and in the 
fsimage.
+   */
+  private Map allPersistedPolicies;
+
+  /**
* All enabled policies sorted by name for fast querying, including built-in
* policy, user defined policy.
*/
@@ -90,6 +100,7 @@ public final class ErasureCodingPolicyManager {
*/
   private ErasureCodingPolicy[] enabledPolicies;
 
+  private String defaultPolicyName;
 
   private volatile static ErasureCodingPolicyManager instance = null;
 
@@ -102,14 +113,11 @@ public final class ErasureCodingPolicyManager {
 
   private ErasureCodingPolicyManager() {}
 
-  public void init(Configuration conf) {
-// Load erasure coding default policy
-final String defaultPolicyName = conf.getTrimmed(
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
+  public void init(Configuration conf) throws IOException {
 this.policiesByName = new TreeMap<>();
 this.policiesByID = new TreeMap<>();
 this.enabledPoliciesByName = new TreeMap<>();
+this.allPersistedPolicies = new TreeMap<>();
 
 /**
  * TODO: load user defined EC policy from fsImage HDFS-7859
@@ -125,31 +133,12 @@ public final class ErasureCodingPolicyManager {
   final ErasureCodingPolicyInfo info = new ErasureCodingPolicyInfo(policy);
   policiesByName.put(policy.getName(), info);
   policiesByID.put(policy.getId(), info);
+  allPersistedPolicies.put(policy.getId(),
+  new ErasureCodingPolicyInfo(policy));
 }
 
-if (!defaultPolicyName.isEmpty()) {
-  final ErasureCodingPolicyInfo info =
-  policiesByName.get(defaultPolicyName);
-  if (info == null) {
-String names = policiesByName.values()
-.stream().map((pi) -> pi.getPolicy().getName())
-.collect(Collectors.joining(", "));
-String msg = String.format("EC policy '%s' specified at %s is not a "
-+ "valid policy. Please choose from list of available "
-+ "policies: [%s]",
-defaultPolicyName,
-DFSConfigKeys.DFS_NAME

hadoop git commit: HDFS-14053. Provide ability for NN to re-replicate based on topology changes. Contributed by Hrishikesh Gadre.

2018-11-05 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk c7fcca0d7 -> ffc9c50e0


HDFS-14053. Provide ability for NN to re-replicate based on topology changes. 
Contributed by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffc9c50e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffc9c50e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffc9c50e

Branch: refs/heads/trunk
Commit: ffc9c50e074aeca804674c6e1e6b0f1eb629e230
Parents: c7fcca0
Author: Xiao Chen 
Authored: Mon Nov 5 21:36:43 2018 -0800
Committer: Xiao Chen 
Committed: Mon Nov 5 21:38:39 2018 -0800

--
 .../server/blockmanagement/BlockManager.java| 38 +++
 .../hdfs/server/namenode/NamenodeFsck.java  | 33 +
 .../org/apache/hadoop/hdfs/tools/DFSck.java | 10 ++-
 .../src/site/markdown/HDFSCommands.md   |  3 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 21 ++
 .../TestBlocksWithNotEnoughRacks.java   | 72 
 6 files changed, 173 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffc9c50e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a5fb0b1..36bbeb1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3535,6 +3535,44 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
+   * Schedule replication work for a specified list of mis-replicated
+   * blocks and return total number of blocks scheduled for replication.
+   *
+   * @param blocks A list of blocks for which replication work needs to
+   *  be scheduled.
+   * @return Total number of blocks for which replication work is scheduled.
+   **/
+  public int processMisReplicatedBlocks(List blocks) {
+int processed = 0;
+Iterator iter = blocks.iterator();
+
+try {
+  while (isPopulatingReplQueues() && namesystem.isRunning()
+  && !Thread.currentThread().isInterrupted()
+  && iter.hasNext()) {
+int limit = processed + numBlocksPerIteration;
+namesystem.writeLockInterruptibly();
+try {
+  while (iter.hasNext() && processed < limit) {
+BlockInfo blk = iter.next();
+MisReplicationResult r = processMisReplicatedBlock(blk);
+LOG.debug("BLOCK* processMisReplicatedBlocks: " +
+"Re-scanned block {}, result is {}", blk, r);
+  }
+} finally {
+  namesystem.writeUnlock();
+}
+  }
+} catch (InterruptedException ex) {
+  LOG.info("Caught InterruptedException while scheduling replication work" 
+
+  " for mis-replicated blocks");
+  Thread.currentThread().interrupt();
+}
+
+return processed;
+  }
+
+  /**
* Process a single possibly misreplicated block. This adds it to the
* appropriate queues if necessary, and returns a result code indicating
* what happened with it.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffc9c50e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 56607f0..f54b407 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -25,6 +25,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.util.ArrayList;
+import java.util.LinkedList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Date;
@@ -173,6 +174,14 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
*/
   private boolean doDelete = false;
 
+  /**
+   * True if the user specified the -replicate option.
+   *
+   * When this option is in effect, we will initiate replication work to make
+   * mis-r

hadoop git commit: HDFS-14027. DFSStripedOutputStream should implement both hsync methods.

2018-10-29 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 0f34ff772 -> 20c9a12bc


HDFS-14027. DFSStripedOutputStream should implement both hsync methods.

(cherry picked from commit db7e636824a36b90ba1c8e9b2fba1162771700fe)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java

(cherry picked from commit 399645ebc162371a63eb81840e01ed52261f43bc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20c9a12b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20c9a12b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20c9a12b

Branch: refs/heads/branch-3.0
Commit: 20c9a12bc754bbd1a2af3e71d2cbf0d6489c0be4
Parents: 0f34ff7
Author: Xiao Chen 
Authored: Mon Oct 29 19:05:52 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 29 19:14:12 2018 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 12 +++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 36 +---
 2 files changed, 35 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20c9a12b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index ed875bb..df9770e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -956,11 +957,22 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   @Override
   public void hflush() {
 // not supported yet
+LOG.debug("DFSStripedOutputStream does not support hflush. "
++ "Caller should check StreamCapabilities before calling.");
   }
 
   @Override
   public void hsync() {
 // not supported yet
+LOG.debug("DFSStripedOutputStream does not support hsync. "
++ "Caller should check StreamCapabilities before calling.");
+  }
+
+  @Override
+  public void hsync(EnumSet syncFlags) {
+// not supported yet
+LOG.debug("DFSStripedOutputStream does not support hsync {}. "
++ "Caller should check StreamCapabilities before calling.", syncFlags);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20c9a12b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 3714542..473557b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -18,11 +18,13 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
+import java.util.EnumSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -30,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StreamCapabilities.StreamCapability;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.IOUtils;
@@ -195,19 +198,26 @@ public class TestDFSStripedOutputStream {
   public void testStreamFlush() throws Exception {
 final byte[] bytes = StripedFileTestUtil.generateBytes(blockSize *
 dataBlocks * 3 + cellSize * dataBlocks + cellSize + 12

hadoop git commit: HDFS-14027. DFSStripedOutputStream should implement both hsync methods.

2018-10-29 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 288bc3f1e -> 8788489df


HDFS-14027. DFSStripedOutputStream should implement both hsync methods.

(cherry picked from commit db7e636824a36b90ba1c8e9b2fba1162771700fe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8788489d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8788489d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8788489d

Branch: refs/heads/branch-3.2
Commit: 8788489df4b8d0830247dad30f3da6c7a31b21ac
Parents: 288bc3f
Author: Xiao Chen 
Authored: Mon Oct 29 19:05:52 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 29 19:13:43 2018 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 12 +++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 36 +---
 2 files changed, 35 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8788489d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index ed875bb..df9770e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -956,11 +957,22 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   @Override
   public void hflush() {
 // not supported yet
+LOG.debug("DFSStripedOutputStream does not support hflush. "
++ "Caller should check StreamCapabilities before calling.");
   }
 
   @Override
   public void hsync() {
 // not supported yet
+LOG.debug("DFSStripedOutputStream does not support hsync. "
++ "Caller should check StreamCapabilities before calling.");
+  }
+
+  @Override
+  public void hsync(EnumSet syncFlags) {
+// not supported yet
+LOG.debug("DFSStripedOutputStream does not support hsync {}. "
++ "Caller should check StreamCapabilities before calling.", syncFlags);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8788489d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 865a736..092aa0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
+import java.util.EnumSet;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -31,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StreamCapabilities.StreamCapability;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.IOUtils;
@@ -196,19 +199,26 @@ public class TestDFSStripedOutputStream {
   public void testStreamFlush() throws Exception {
 final byte[] bytes = StripedFileTestUtil.generateBytes(blockSize *
 dataBlocks * 3 + cellSize * dataBlocks + cellSize + 123);
-FSDataOutputStream os = fs.create(new Path("/ec-file-1"));
-assertFalse("DFSStripedOutputStream should not have hflush() " +
-&qu

hadoop git commit: HDFS-14027. DFSStripedOutputStream should implement both hsync methods.

2018-10-29 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 7dd8eafe3 -> 399645ebc


HDFS-14027. DFSStripedOutputStream should implement both hsync methods.

(cherry picked from commit db7e636824a36b90ba1c8e9b2fba1162771700fe)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/399645eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/399645eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/399645eb

Branch: refs/heads/branch-3.1
Commit: 399645ebc162371a63eb81840e01ed52261f43bc
Parents: 7dd8eaf
Author: Xiao Chen 
Authored: Mon Oct 29 19:05:52 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 29 19:13:51 2018 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 12 +++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 36 +---
 2 files changed, 35 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/399645eb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index ed875bb..df9770e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -956,11 +957,22 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   @Override
   public void hflush() {
 // not supported yet
+LOG.debug("DFSStripedOutputStream does not support hflush. "
++ "Caller should check StreamCapabilities before calling.");
   }
 
   @Override
   public void hsync() {
 // not supported yet
+LOG.debug("DFSStripedOutputStream does not support hsync. "
++ "Caller should check StreamCapabilities before calling.");
+  }
+
+  @Override
+  public void hsync(EnumSet syncFlags) {
+// not supported yet
+LOG.debug("DFSStripedOutputStream does not support hsync {}. "
++ "Caller should check StreamCapabilities before calling.", syncFlags);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/399645eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 3714542..473557b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -18,11 +18,13 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
+import java.util.EnumSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -30,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StreamCapabilities.StreamCapability;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.IOUtils;
@@ -195,19 +198,26 @@ public class TestDFSStripedOutputStream {
   public void testStreamFlush() throws Exception {
 final byte[] bytes = StripedFileTestUtil.generateBytes(blockSize *
 dataBlocks * 3 + cellSize * dataBlocks + cellSize + 123);
-FSDataOutputStream os = fs.create(new Path("/ec-file-1

hadoop git commit: HDFS-14027. DFSStripedOutputStream should implement both hsync methods.

2018-10-29 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 496f0ffe9 -> db7e63682


HDFS-14027. DFSStripedOutputStream should implement both hsync methods.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db7e6368
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db7e6368
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db7e6368

Branch: refs/heads/trunk
Commit: db7e636824a36b90ba1c8e9b2fba1162771700fe
Parents: 496f0ff
Author: Xiao Chen 
Authored: Mon Oct 29 19:05:52 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 29 19:06:15 2018 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 12 +++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 36 +---
 2 files changed, 35 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db7e6368/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index ed875bb..df9770e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -956,11 +957,22 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   @Override
   public void hflush() {
 // not supported yet
+LOG.debug("DFSStripedOutputStream does not support hflush. "
++ "Caller should check StreamCapabilities before calling.");
   }
 
   @Override
   public void hsync() {
 // not supported yet
+LOG.debug("DFSStripedOutputStream does not support hsync. "
++ "Caller should check StreamCapabilities before calling.");
+  }
+
+  @Override
+  public void hsync(EnumSet syncFlags) {
+// not supported yet
+LOG.debug("DFSStripedOutputStream does not support hsync {}. "
++ "Caller should check StreamCapabilities before calling.", syncFlags);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db7e6368/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 865a736..092aa0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
+import java.util.EnumSet;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -31,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StreamCapabilities.StreamCapability;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.IOUtils;
@@ -196,19 +199,26 @@ public class TestDFSStripedOutputStream {
   public void testStreamFlush() throws Exception {
 final byte[] bytes = StripedFileTestUtil.generateBytes(blockSize *
 dataBlocks * 3 + cellSize * dataBlocks + cellSize + 123);
-FSDataOutputStream os = fs.create(new Path("/ec-file-1"));
-assertFalse("DFSStripedOutputStream should not have hflush() " +
-"capability yet!", os.hasCapability(
-StreamC

hadoop git commit: HDFS-14003. Fix findbugs warning in trunk for FSImageFormatPBINode. Contributed by Yiqun Lin.

2018-10-24 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e8832418b -> 1d5390679


HDFS-14003. Fix findbugs warning in trunk for FSImageFormatPBINode. Contributed 
by Yiqun Lin.

(cherry picked from commit 0e56c883cd2310f3ff9d62afb306b1ab27419c36)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d539067
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d539067
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d539067

Branch: refs/heads/branch-3.0
Commit: 1d5390679ea847af578a612e5c6ce50c7c7fd3fb
Parents: e883241
Author: Inigo Goiri 
Authored: Thu Oct 18 10:53:30 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 24 16:14:22 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d539067/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index ddc19bd..3193c4f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -659,7 +659,6 @@ public final class FSImageFormatPBINode {
 }
 
 private void save(OutputStream out, INodeSymlink n) throws IOException {
-  SaverContext state = parent.getSaverContext();
   INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
   .newBuilder()
   .setPermission(buildPermissionStatus(n))


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14003. Fix findbugs warning in trunk for FSImageFormatPBINode. Contributed by Yiqun Lin.

2018-10-24 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 aed71316e -> 5b995f85f


HDFS-14003. Fix findbugs warning in trunk for FSImageFormatPBINode. Contributed 
by Yiqun Lin.

(cherry picked from commit 0e56c883cd2310f3ff9d62afb306b1ab27419c36)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b995f85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b995f85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b995f85

Branch: refs/heads/branch-3.2
Commit: 5b995f85f5d78b96efe088f4b229127f5c92dbc1
Parents: aed7131
Author: Inigo Goiri 
Authored: Thu Oct 18 10:53:30 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 24 16:14:06 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b995f85/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 7c4f21e..bc455e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -660,7 +660,6 @@ public final class FSImageFormatPBINode {
 }
 
 private void save(OutputStream out, INodeSymlink n) throws IOException {
-  SaverContext state = parent.getSaverContext();
   INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
   .newBuilder()
   .setPermission(buildPermissionStatus(n))


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14003. Fix findbugs warning in trunk for FSImageFormatPBINode. Contributed by Yiqun Lin.

2018-10-24 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 96cedb87b -> 4b1f2ecd4


HDFS-14003. Fix findbugs warning in trunk for FSImageFormatPBINode. Contributed 
by Yiqun Lin.

(cherry picked from commit 0e56c883cd2310f3ff9d62afb306b1ab27419c36)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b1f2ecd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b1f2ecd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b1f2ecd

Branch: refs/heads/branch-3.1
Commit: 4b1f2ecd4c33ce05422f4a427a1e6d18cc30a558
Parents: 96cedb8
Author: Inigo Goiri 
Authored: Thu Oct 18 10:53:30 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 24 16:14:14 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b1f2ecd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index ddc19bd..3193c4f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -659,7 +659,6 @@ public final class FSImageFormatPBINode {
 }
 
 private void save(OutputStream out, INodeSymlink n) throws IOException {
-  SaverContext state = parent.getSaverContext();
   INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
   .newBuilder()
   .setPermission(buildPermissionStatus(n))


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Fix potential FSImage corruption. Contributed by Daryn Sharp and Vinayakumar B.

2018-10-24 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 6aef51479 -> e8832418b


Fix potential FSImage corruption. Contributed by Daryn Sharp and Vinayakumar B.

(cherry picked from commit f1996ccbaee734d423caa9d47a571cfff98ef42c)
(cherry picked from commit 96cedb87b94c07c11152580bf36978186d622b50)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8832418
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8832418
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8832418

Branch: refs/heads/branch-3.0
Commit: e8832418bf97294c82584d76e78972a128d41b32
Parents: 6aef514
Author: Xiao Chen 
Authored: Wed Oct 24 15:49:27 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 24 16:10:51 2018 -0700

--
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   8 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +
 .../server/namenode/AclEntryStatusFormat.java   | 109 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +
 .../server/namenode/FSImageFormatPBINode.java   | 101 +++---
 .../server/namenode/FSImageFormatProtobuf.java  |  26 ++-
 .../namenode/INodeWithAdditionalFields.java |  36 +++-
 .../server/namenode/SerialNumberManager.java| 200 +--
 .../hdfs/server/namenode/SerialNumberMap.java   |  43 +++-
 .../hdfs/server/namenode/XAttrFormat.java   |  95 +
 .../hdfs/server/namenode/XAttrStorage.java  |  11 -
 .../tools/offlineImageViewer/FSImageLoader.java |  18 +-
 .../offlineImageViewer/PBImageTextWriter.java   |   3 +-
 .../offlineImageViewer/PBImageXmlWriter.java|  22 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../TestCommitBlockSynchronization.java |   6 +-
 16 files changed, 437 insertions(+), 250 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8832418/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 51ad08f..195fd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -26,6 +26,10 @@ import java.io.Serializable;
 public class LongBitFormat implements Serializable {
   private static final long serialVersionUID = 1L;
 
+  public interface Enum {
+int getLength();
+  }
+
   private final String NAME;
   /** Bit offset */
   private final int OFFSET;
@@ -69,4 +73,8 @@ public class LongBitFormat implements Serializable {
   public long getMin() {
 return MIN;
   }
+
+  public int getLength() {
+return LENGTH;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8832418/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 7932e68..861e6df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -743,6 +743,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final long DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_DEFAULT =
   0;  //no throttling
 
+  // String table in the fsimage utilizes an expanded bit range.
+  public static final String DFS_IMAGE_EXPANDED_STRING_TABLES_KEY =
+  "dfs.image.string-tables.expanded";
+  public static final boolean DFS_IMAGE_EXPANDED_STRING_TABLES_DEFAULT =
+  false;
+
   // Image transfer timeout
   public static final String DFS_IMAGE_TRANSFER_TIMEOUT_KEY = 
"dfs.image.transfer.timeout";
   public static final int DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT = 60 * 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8832418/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
index 2c5b23b..e9e3e59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main

hadoop git commit: Fix potential FSImage corruption. Contributed by Daryn Sharp and Vinayakumar B.

2018-10-24 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 4167275e8 -> 96cedb87b


Fix potential FSImage corruption. Contributed by Daryn Sharp and Vinayakumar B.

(cherry picked from commit f1996ccbaee734d423caa9d47a571cfff98ef42c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96cedb87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96cedb87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96cedb87

Branch: refs/heads/branch-3.1
Commit: 96cedb87b94c07c11152580bf36978186d622b50
Parents: 4167275
Author: Xiao Chen 
Authored: Wed Oct 24 15:49:27 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 24 16:10:25 2018 -0700

--
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   8 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +
 .../server/namenode/AclEntryStatusFormat.java   | 109 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +
 .../server/namenode/FSImageFormatPBINode.java   | 101 +++---
 .../server/namenode/FSImageFormatProtobuf.java  |  26 ++-
 .../namenode/INodeWithAdditionalFields.java |  36 +++-
 .../server/namenode/SerialNumberManager.java| 200 +--
 .../hdfs/server/namenode/SerialNumberMap.java   |  43 +++-
 .../hdfs/server/namenode/XAttrFormat.java   |  95 +
 .../hdfs/server/namenode/XAttrStorage.java  |  11 -
 .../tools/offlineImageViewer/FSImageLoader.java |  18 +-
 .../offlineImageViewer/PBImageTextWriter.java   |   3 +-
 .../offlineImageViewer/PBImageXmlWriter.java|  22 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../TestCommitBlockSynchronization.java |   6 +-
 16 files changed, 437 insertions(+), 250 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96cedb87/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 51ad08f..195fd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -26,6 +26,10 @@ import java.io.Serializable;
 public class LongBitFormat implements Serializable {
   private static final long serialVersionUID = 1L;
 
+  public interface Enum {
+int getLength();
+  }
+
   private final String NAME;
   /** Bit offset */
   private final int OFFSET;
@@ -69,4 +73,8 @@ public class LongBitFormat implements Serializable {
   public long getMin() {
 return MIN;
   }
+
+  public int getLength() {
+return LENGTH;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96cedb87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index aa5e758..898e3a1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -794,6 +794,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final long DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_DEFAULT =
   0;  //no throttling
 
+  // String table in the fsimage utilizes an expanded bit range.
+  public static final String DFS_IMAGE_EXPANDED_STRING_TABLES_KEY =
+  "dfs.image.string-tables.expanded";
+  public static final boolean DFS_IMAGE_EXPANDED_STRING_TABLES_DEFAULT =
+  false;
+
   // Image transfer timeout
   public static final String DFS_IMAGE_TRANSFER_TIMEOUT_KEY = 
"dfs.image.transfer.timeout";
   public static final int DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT = 60 * 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96cedb87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
index 2c5b23b..e9e3e59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.

hadoop git commit: HDFS-14021. TestReconstructStripedBlocksWithRackAwareness#testReconstructForNotEnoughRacks fails intermittently. Contributed by Xiao Chen.

2018-10-24 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 3665c4617 -> b6937e8e7


HDFS-14021. 
TestReconstructStripedBlocksWithRackAwareness#testReconstructForNotEnoughRacks 
fails intermittently. Contributed by Xiao Chen.

(cherry picked from commit c1874046e2f9275ba330090fcf12c5611c6d6fc4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6937e8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6937e8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6937e8e

Branch: refs/heads/branch-3.2
Commit: b6937e8e702717e6f9c767420ec2a71bb78650ec
Parents: 3665c46
Author: Inigo Goiri 
Authored: Wed Oct 24 09:20:38 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 24 09:38:32 2018 -0700

--
 .../TestReconstructStripedBlocksWithRackAwareness.java   | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6937e8e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index 298b9eb..6bfc0b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -162,7 +162,9 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 // the file's block is in 9 dn but 5 racks
 DFSTestUtil.createFile(fs, file,
 cellSize * dataBlocks * 2, (short) 1, 0L);
-Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+GenericTestUtils.waitFor(() ->
+bm.numOfUnderReplicatedBlocks() == 0, 100, 3);
+LOG.info("Created file {}", file);
 
 final INodeFile fileNode = fsn.getFSDirectory()
 .getINode4Write(file.toString()).asFile();
@@ -173,7 +175,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 for (DatanodeStorageInfo storage : blockInfo.storages) {
   rackSet.add(storage.getDatanodeDescriptor().getNetworkLocation());
 }
-Assert.assertEquals(dataBlocks - 1, rackSet.size());
+Assert.assertEquals("rackSet size is wrong: " + rackSet, dataBlocks - 1,
+rackSet.size());
 
 // restart the stopped datanode
 cluster.restartDataNode(lastHost);
@@ -181,6 +184,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 
 // make sure we have 6 racks again
 NetworkTopology topology = bm.getDatanodeManager().getNetworkTopology();
+LOG.info("topology is: {}", topology);
 Assert.assertEquals(hosts.length, topology.getNumOfLeaves());
 Assert.assertEquals(dataBlocks, topology.getNumOfRacks());
 
@@ -202,7 +206,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
   for (DatanodeStorageInfo storage : blockInfo.storages) {
 if (storage != null) {
   DatanodeDescriptor dn = storage.getDatanodeDescriptor();
-  Assert.assertEquals(0, dn.getNumberOfBlocksToBeErasureCoded());
+  Assert.assertEquals("Block to be erasure coded is wrong for 
datanode:"
+  + dn, 0, dn.getNumberOfBlocksToBeErasureCoded());
   if (dn.getNumberOfBlocksToBeReplicated() == 1) {
 scheduled = true;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14021. TestReconstructStripedBlocksWithRackAwareness#testReconstructForNotEnoughRacks fails intermittently. Contributed by Xiao Chen.

2018-10-24 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e402791a5 -> 6aef51479


HDFS-14021. 
TestReconstructStripedBlocksWithRackAwareness#testReconstructForNotEnoughRacks 
fails intermittently. Contributed by Xiao Chen.

(cherry picked from commit c1874046e2f9275ba330090fcf12c5611c6d6fc4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6aef5147
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6aef5147
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6aef5147

Branch: refs/heads/branch-3.0
Commit: 6aef514795f9da64a0255ebaa5154f7ca20d265f
Parents: e402791
Author: Inigo Goiri 
Authored: Wed Oct 24 09:20:38 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 24 09:38:48 2018 -0700

--
 .../TestReconstructStripedBlocksWithRackAwareness.java   | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6aef5147/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index 7d16017c..555673c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -162,7 +162,9 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 // the file's block is in 9 dn but 5 racks
 DFSTestUtil.createFile(fs, file,
 cellSize * dataBlocks * 2, (short) 1, 0L);
-Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+GenericTestUtils.waitFor(() ->
+bm.numOfUnderReplicatedBlocks() == 0, 100, 3);
+LOG.info("Created file {}", file);
 
 final INodeFile fileNode = fsn.getFSDirectory()
 .getINode4Write(file.toString()).asFile();
@@ -173,7 +175,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 for (DatanodeStorageInfo storage : blockInfo.storages) {
   rackSet.add(storage.getDatanodeDescriptor().getNetworkLocation());
 }
-Assert.assertEquals(dataBlocks - 1, rackSet.size());
+Assert.assertEquals("rackSet size is wrong: " + rackSet, dataBlocks - 1,
+rackSet.size());
 
 // restart the stopped datanode
 cluster.restartDataNode(lastHost);
@@ -181,6 +184,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 
 // make sure we have 6 racks again
 NetworkTopology topology = bm.getDatanodeManager().getNetworkTopology();
+LOG.info("topology is: {}", topology);
 Assert.assertEquals(hosts.length, topology.getNumOfLeaves());
 Assert.assertEquals(dataBlocks, topology.getNumOfRacks());
 
@@ -202,7 +206,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
   for (DatanodeStorageInfo storage : blockInfo.storages) {
 if (storage != null) {
   DatanodeDescriptor dn = storage.getDatanodeDescriptor();
-  Assert.assertEquals(0, dn.getNumberOfBlocksToBeErasureCoded());
+  Assert.assertEquals("Block to be erasure coded is wrong for 
datanode:"
+  + dn, 0, dn.getNumberOfBlocksToBeErasureCoded());
   if (dn.getNumberOfBlocksToBeReplicated() == 1) {
 scheduled = true;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14021. TestReconstructStripedBlocksWithRackAwareness#testReconstructForNotEnoughRacks fails intermittently. Contributed by Xiao Chen.

2018-10-24 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 080e9d0f9 -> 2e43966b5


HDFS-14021. 
TestReconstructStripedBlocksWithRackAwareness#testReconstructForNotEnoughRacks 
fails intermittently. Contributed by Xiao Chen.

(cherry picked from commit c1874046e2f9275ba330090fcf12c5611c6d6fc4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e43966b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e43966b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e43966b

Branch: refs/heads/branch-3.1
Commit: 2e43966b56819feb95dfc810f4362a5b1f024a83
Parents: 080e9d0
Author: Inigo Goiri 
Authored: Wed Oct 24 09:20:38 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 24 09:38:40 2018 -0700

--
 .../TestReconstructStripedBlocksWithRackAwareness.java   | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43966b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index 7d16017c..555673c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -162,7 +162,9 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 // the file's block is in 9 dn but 5 racks
 DFSTestUtil.createFile(fs, file,
 cellSize * dataBlocks * 2, (short) 1, 0L);
-Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+GenericTestUtils.waitFor(() ->
+bm.numOfUnderReplicatedBlocks() == 0, 100, 3);
+LOG.info("Created file {}", file);
 
 final INodeFile fileNode = fsn.getFSDirectory()
 .getINode4Write(file.toString()).asFile();
@@ -173,7 +175,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 for (DatanodeStorageInfo storage : blockInfo.storages) {
   rackSet.add(storage.getDatanodeDescriptor().getNetworkLocation());
 }
-Assert.assertEquals(dataBlocks - 1, rackSet.size());
+Assert.assertEquals("rackSet size is wrong: " + rackSet, dataBlocks - 1,
+rackSet.size());
 
 // restart the stopped datanode
 cluster.restartDataNode(lastHost);
@@ -181,6 +184,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 
 // make sure we have 6 racks again
 NetworkTopology topology = bm.getDatanodeManager().getNetworkTopology();
+LOG.info("topology is: {}", topology);
 Assert.assertEquals(hosts.length, topology.getNumOfLeaves());
 Assert.assertEquals(dataBlocks, topology.getNumOfRacks());
 
@@ -202,7 +206,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
   for (DatanodeStorageInfo storage : blockInfo.storages) {
 if (storage != null) {
   DatanodeDescriptor dn = storage.getDatanodeDescriptor();
-  Assert.assertEquals(0, dn.getNumberOfBlocksToBeErasureCoded());
+  Assert.assertEquals("Block to be erasure coded is wrong for 
datanode:"
+  + dn, 0, dn.getNumberOfBlocksToBeErasureCoded());
   if (dn.getNumberOfBlocksToBeReplicated() == 1) {
 scheduled = true;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15873. Add JavaBeans Activation Framework API to LICENSE.txt. Contributed by Akira Ajisaka.

2018-10-23 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk b61846392 -> 296818a2c


HADOOP-15873. Add JavaBeans Activation Framework API to LICENSE.txt.
Contributed by Akira Ajisaka.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/296818a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/296818a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/296818a2

Branch: refs/heads/trunk
Commit: 296818a2c8578cf9ea619292efec16fd35f828a2
Parents: b618463
Author: Akira Ajisaka 
Authored: Tue Oct 23 08:45:38 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 23 08:47:00 2018 -0700

--
 LICENSE.txt | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/296818a2/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 393ed0e..94c9065 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1275,6 +1275,7 @@ grizzly-http 2.2.21
 grizzly-http-server 2.2.21
 grizzly-http-servlet 2.2.21
 grizzly-rcm 2.2.21
+JavaBeans Activation Framework 1.2.0
 

 (CDDL 1.1)
 COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-11100. Support to configure ftpClient.setControlKeepAliveTimeout. Contributed by Adam Antal.

2018-10-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk d54f5598f -> 24dc068a3


HADOOP-11100. Support to configure ftpClient.setControlKeepAliveTimeout.
Contributed by Adam Antal.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24dc068a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24dc068a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24dc068a

Branch: refs/heads/trunk
Commit: 24dc068a361648b4e59e1807b07ff2239f41c740
Parents: d54f559
Author: Adam Antal 
Authored: Wed Oct 17 11:32:17 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 17 11:34:50 2018 -0700

--
 .../java/org/apache/hadoop/fs/ftp/FTPFileSystem.java | 13 +
 .../src/main/resources/core-default.xml  |  8 
 .../hadoop/conf/TestCommonConfigurationFields.java   |  1 +
 .../org/apache/hadoop/fs/ftp/TestFTPFileSystem.java  | 15 +++
 4 files changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24dc068a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 676c207..4b144bf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -62,6 +62,7 @@ public class FTPFileSystem extends FileSystem {
   public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
 
   public static final int DEFAULT_BLOCK_SIZE = 4 * 1024;
+  public static final long DEFAULT_TIMEOUT = 0;
   public static final String FS_FTP_USER_PREFIX = "fs.ftp.user.";
   public static final String FS_FTP_HOST = "fs.ftp.host";
   public static final String FS_FTP_HOST_PORT = "fs.ftp.host.port";
@@ -71,6 +72,7 @@ public class FTPFileSystem extends FileSystem {
   public static final String FS_FTP_TRANSFER_MODE = "fs.ftp.transfer.mode";
   public static final String E_SAME_DIRECTORY_ONLY =
   "only same directory renames are supported";
+  public static final String FS_FTP_TIMEOUT = "fs.ftp.timeout";
 
   private URI uri;
 
@@ -150,6 +152,7 @@ public class FTPFileSystem extends FileSystem {
   client.setFileTransferMode(getTransferMode(conf));
   client.setFileType(FTP.BINARY_FILE_TYPE);
   client.setBufferSize(DEFAULT_BUFFER_SIZE);
+  setTimeout(client, conf);
   setDataConnectionMode(client, conf);
 } else {
   throw new IOException("Login failed on server - " + host + ", port - "
@@ -160,6 +163,16 @@ public class FTPFileSystem extends FileSystem {
   }
 
   /**
+   * Set the FTPClient's timeout based on configuration.
+   * FS_FTP_TIMEOUT is set as timeout (defaults to DEFAULT_TIMEOUT).
+   */
+  @VisibleForTesting
+  void setTimeout(FTPClient client, Configuration conf) {
+long timeout = conf.getLong(FS_FTP_TIMEOUT, DEFAULT_TIMEOUT);
+client.setControlKeepAliveTimeout(timeout);
+  }
+
+  /**
* Set FTP's transfer mode based on configuration. Valid values are
* STREAM_TRANSFER_MODE, BLOCK_TRANSFER_MODE and COMPRESSED_TRANSFER_MODE.
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24dc068a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 32dd622..599396f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -926,6 +926,14 @@
 
 
 
+  fs.ftp.timeout
+  0
+  
+FTP filesystem's timeout in seconds.
+  
+
+
+
   fs.df.interval
   6
   Disk usage statistics refresh interval in msec.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24dc068a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index e10617d..2766b56 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.j

hadoop git commit: HDFS-13662. TestBlockReaderLocal#testStatisticsForErasureCodingRead is flaky

2018-10-16 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 83769b63d -> 08b415d17


HDFS-13662. TestBlockReaderLocal#testStatisticsForErasureCodingRead is flaky

(cherry picked from commit 533138718cc05b78e0afe583d7a9bd30e8a48fdc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08b415d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08b415d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08b415d1

Branch: refs/heads/branch-3.2
Commit: 08b415d17028dc7c8e4784dd84ef6326c91c2fcf
Parents: 83769b6
Author: Xiao Chen 
Authored: Tue Oct 16 19:32:12 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 16 19:34:41 2018 -0700

--
 .../hadoop/hdfs/client/impl/TestBlockReaderLocal.java | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08b415d1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
index ace21c0..95fb67a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
@@ -28,6 +28,7 @@ import java.nio.ByteBuffer;
 import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -827,9 +828,12 @@ public class TestBlockReaderLocal {
 
   Path ecFile = new Path(ecDir, "file2");
   DFSTestUtil.createFile(fs, ecFile, length, repl, randomSeed);
-  // Shutdown one DataNode so that erasure coding decoding process can kick
-  // in.
-  cluster.shutdownDataNode(0);
+
+  // Shutdown a DataNode that holds a data block, to trigger EC decoding.
+  final BlockLocation[] locs = fs.getFileBlockLocations(ecFile, 0, length);
+  final String[] nodes = locs[0].getNames();
+  cluster.stopDataNode(nodes[0]);
+
   try (HdfsDataInputStream in = (HdfsDataInputStream) fs.open(ecFile)) {
 IOUtils.readFully(in, buf, 0, length);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13662. TestBlockReaderLocal#testStatisticsForErasureCodingRead is flaky

2018-10-16 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk e3342a1ab -> 533138718


HDFS-13662. TestBlockReaderLocal#testStatisticsForErasureCodingRead is flaky


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53313871
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53313871
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53313871

Branch: refs/heads/trunk
Commit: 533138718cc05b78e0afe583d7a9bd30e8a48fdc
Parents: e3342a1
Author: Xiao Chen 
Authored: Tue Oct 16 19:32:12 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 16 19:33:55 2018 -0700

--
 .../hadoop/hdfs/client/impl/TestBlockReaderLocal.java | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53313871/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
index ace21c0..95fb67a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
@@ -28,6 +28,7 @@ import java.nio.ByteBuffer;
 import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -827,9 +828,12 @@ public class TestBlockReaderLocal {
 
   Path ecFile = new Path(ecDir, "file2");
   DFSTestUtil.createFile(fs, ecFile, length, repl, randomSeed);
-  // Shutdown one DataNode so that erasure coding decoding process can kick
-  // in.
-  cluster.shutdownDataNode(0);
+
+  // Shutdown a DataNode that holds a data block, to trigger EC decoding.
+  final BlockLocation[] locs = fs.getFileBlockLocations(ecFile, 0, length);
+  final String[] nodes = locs[0].getNames();
+  cluster.stopDataNode(nodes[0]);
+
   try (HdfsDataInputStream in = (HdfsDataInputStream) fs.open(ecFile)) {
 IOUtils.readFully(in, buf, 0, length);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7132. JobSplitWriter prints unnecessary warnings if EC(RS10, 4) is used. Contributed by Peter Bacsko.

2018-10-16 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b9f65d717 -> 99b447f61


MAPREDUCE-7132. JobSplitWriter prints unnecessary warnings if EC(RS10,4) is 
used. Contributed by Peter Bacsko.

(cherry picked from commit 25f8fcb06476938826cdc92858a61124b18cd98d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99b447f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99b447f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99b447f6

Branch: refs/heads/branch-3.0
Commit: 99b447f6129a47a18d93d66868e70943e777baf6
Parents: b9f65d7
Author: Xiao Chen 
Authored: Tue Oct 16 10:22:47 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 16 10:24:33 2018 -0700

--
 .../org/apache/hadoop/mapreduce/MRConfig.java   |   2 +-
 .../src/main/resources/mapred-default.xml   |   2 +-
 .../split/TestJobSplitWriterWithEC.java | 128 +++
 3 files changed, 130 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b447f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
index e85c893..b4d9149 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
@@ -78,7 +78,7 @@ public interface MRConfig {
 "mapreduce.task.max.status.length";
   public static final int PROGRESS_STATUS_LEN_LIMIT_DEFAULT = 512;
 
-  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 10;
+  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 15;
   public static final String MAX_BLOCK_LOCATIONS_KEY =
 "mapreduce.job.max.split.locations";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b447f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 16ecd90..cc89e6c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -127,7 +127,7 @@
 
   
 mapreduce.job.max.split.locations
-10
+15
 The max number of block locations to store for each split for
 locality calculation.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b447f6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
new file mode 100644
index 000..23f8a40
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific languag

hadoop git commit: MAPREDUCE-7132. JobSplitWriter prints unnecessary warnings if EC(RS10, 4) is used. Contributed by Peter Bacsko.

2018-10-16 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 a439aa79d -> 424c99681


MAPREDUCE-7132. JobSplitWriter prints unnecessary warnings if EC(RS10,4) is 
used. Contributed by Peter Bacsko.

(cherry picked from commit 25f8fcb06476938826cdc92858a61124b18cd98d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/424c9968
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/424c9968
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/424c9968

Branch: refs/heads/branch-3.2
Commit: 424c99681bfa4a9846fc3e5a69007c0f84a67840
Parents: a439aa7
Author: Xiao Chen 
Authored: Tue Oct 16 10:22:47 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 16 10:24:15 2018 -0700

--
 .../org/apache/hadoop/mapreduce/MRConfig.java   |   2 +-
 .../src/main/resources/mapred-default.xml   |   2 +-
 .../split/TestJobSplitWriterWithEC.java | 128 +++
 3 files changed, 130 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/424c9968/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
index e85c893..b4d9149 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
@@ -78,7 +78,7 @@ public interface MRConfig {
 "mapreduce.task.max.status.length";
   public static final int PROGRESS_STATUS_LEN_LIMIT_DEFAULT = 512;
 
-  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 10;
+  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 15;
   public static final String MAX_BLOCK_LOCATIONS_KEY =
 "mapreduce.job.max.split.locations";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/424c9968/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 9f33d65..e5da41f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -149,7 +149,7 @@
 
   
 mapreduce.job.max.split.locations
-10
+15
 The max number of block locations to store for each split for
 locality calculation.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/424c9968/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
new file mode 100644
index 000..23f8a40
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific languag

hadoop git commit: MAPREDUCE-7132. JobSplitWriter prints unnecessary warnings if EC(RS10, 4) is used. Contributed by Peter Bacsko.

2018-10-16 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d1749fbaa -> 30526f2c5


MAPREDUCE-7132. JobSplitWriter prints unnecessary warnings if EC(RS10,4) is 
used. Contributed by Peter Bacsko.

(cherry picked from commit 25f8fcb06476938826cdc92858a61124b18cd98d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30526f2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30526f2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30526f2c

Branch: refs/heads/branch-3.1
Commit: 30526f2c5c61b61d86fd6d65a4959116d7d92234
Parents: d1749fb
Author: Xiao Chen 
Authored: Tue Oct 16 10:22:47 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 16 10:24:24 2018 -0700

--
 .../org/apache/hadoop/mapreduce/MRConfig.java   |   2 +-
 .../src/main/resources/mapred-default.xml   |   2 +-
 .../split/TestJobSplitWriterWithEC.java | 128 +++
 3 files changed, 130 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30526f2c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
index e85c893..b4d9149 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
@@ -78,7 +78,7 @@ public interface MRConfig {
 "mapreduce.task.max.status.length";
   public static final int PROGRESS_STATUS_LEN_LIMIT_DEFAULT = 512;
 
-  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 10;
+  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 15;
   public static final String MAX_BLOCK_LOCATIONS_KEY =
 "mapreduce.job.max.split.locations";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30526f2c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 13f4d34..c7d6cd2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -149,7 +149,7 @@
 
   
 mapreduce.job.max.split.locations
-10
+15
 The max number of block locations to store for each split for
 locality calculation.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30526f2c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
new file mode 100644
index 000..23f8a40
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific languag

hadoop git commit: MAPREDUCE-7132. JobSplitWriter prints unnecessary warnings if EC(RS10, 4) is used. Contributed by Peter Bacsko.

2018-10-16 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 753f149fd -> 25f8fcb06


MAPREDUCE-7132. JobSplitWriter prints unnecessary warnings if EC(RS10,4) is 
used. Contributed by Peter Bacsko.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25f8fcb0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25f8fcb0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25f8fcb0

Branch: refs/heads/trunk
Commit: 25f8fcb06476938826cdc92858a61124b18cd98d
Parents: 753f149
Author: Xiao Chen 
Authored: Tue Oct 16 10:22:47 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 16 10:23:31 2018 -0700

--
 .../org/apache/hadoop/mapreduce/MRConfig.java   |   2 +-
 .../src/main/resources/mapred-default.xml   |   2 +-
 .../split/TestJobSplitWriterWithEC.java | 128 +++
 3 files changed, 130 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f8fcb0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
index e85c893..b4d9149 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
@@ -78,7 +78,7 @@ public interface MRConfig {
 "mapreduce.task.max.status.length";
   public static final int PROGRESS_STATUS_LEN_LIMIT_DEFAULT = 512;
 
-  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 10;
+  public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 15;
   public static final String MAX_BLOCK_LOCATIONS_KEY =
 "mapreduce.job.max.split.locations";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f8fcb0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 9f33d65..e5da41f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -149,7 +149,7 @@
 
   
 mapreduce.job.max.split.locations
-10
+15
 The max number of block locations to store for each split for
 locality calculation.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f8fcb0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
new file mode 100644
index 000..23f8a40
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriterWithEC.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package o

hadoop git commit: HADOOP-14445. Addendum: Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances.

2018-10-15 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ff7ca472d -> b9f65d717


HADOOP-14445. Addendum: Use DelegationTokenIssuer to create KMS delegation 
tokens that can authenticate to all KMS instances.

(cherry picked from commit b6fc72a0250ac3f2341ebe8a14d19b073e6224c8)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9f65d71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9f65d71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9f65d71

Branch: refs/heads/branch-3.0
Commit: b9f65d717c9ef8aa7f71814d3b1a5229957d205e
Parents: ff7ca47
Author: Xiao Chen 
Authored: Mon Oct 15 10:50:27 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 15 11:00:39 2018 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  2 +-
 .../crypto/key/KeyProviderTokenIssuer.java  |  2 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  2 +-
 .../security/token/DelegationTokenIssuer.java   |  3 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |  2 +-
 .../org/apache/hadoop/fs/TestHarFileSystem.java |  2 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  2 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 11 ++-
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 32 +++-
 9 files changed, 34 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f65d71/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 280ee86..92853ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f65d71/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
index 81caff4..187bee6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
@@ -22,7 +22,7 @@ import java.net.URI;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 /**
  * File systems that support Encryption Zones have to implement this interface.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f65d71/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index d210462..c346b4b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -63,7 +63,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-imp

hadoop git commit: HADOOP-14445. Addendum: Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances.

2018-10-15 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 1ca5f974f -> ced259609


HADOOP-14445. Addendum: Use DelegationTokenIssuer to create KMS delegation 
tokens that can authenticate to all KMS instances.

(cherry picked from commit b6fc72a0250ac3f2341ebe8a14d19b073e6224c8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ced25960
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ced25960
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ced25960

Branch: refs/heads/branch-3.2
Commit: ced2596094dbada4a2bb00164e1dd6a972732c54
Parents: 1ca5f97
Author: Xiao Chen 
Authored: Mon Oct 15 10:50:27 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 15 11:00:21 2018 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  2 +-
 .../crypto/key/KeyProviderTokenIssuer.java  |  2 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  2 +-
 .../security/token/DelegationTokenIssuer.java   |  3 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |  2 +-
 .../org/apache/hadoop/fs/TestHarFileSystem.java |  2 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  2 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 11 ++-
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 32 +++-
 9 files changed, 34 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ced25960/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 280ee86..92853ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ced25960/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
index 81caff4..187bee6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
@@ -22,7 +22,7 @@ import java.net.URI;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 /**
  * File systems that support Encryption Zones have to implement this interface.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ced25960/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 75588c4..35d653a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 import org.apache.hadoop.util.ClassUtil;
 import org.apache.hadoop.util.DataChecksum;
 imp

hadoop git commit: HADOOP-14445. Addendum: Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances.

2018-10-15 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 38e187384 -> d1749fbaa


HADOOP-14445. Addendum: Use DelegationTokenIssuer to create KMS delegation 
tokens that can authenticate to all KMS instances.

(cherry picked from commit b6fc72a0250ac3f2341ebe8a14d19b073e6224c8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1749fba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1749fba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1749fba

Branch: refs/heads/branch-3.1
Commit: d1749fbaae933411e252edf5c5f72bc8a76f8e1c
Parents: 38e1873
Author: Xiao Chen 
Authored: Mon Oct 15 10:50:27 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 15 11:00:30 2018 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  2 +-
 .../crypto/key/KeyProviderTokenIssuer.java  |  2 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  2 +-
 .../security/token/DelegationTokenIssuer.java   |  3 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |  2 +-
 .../org/apache/hadoop/fs/TestHarFileSystem.java |  2 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  2 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 11 ++-
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 32 +++-
 9 files changed, 34 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1749fba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 280ee86..92853ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1749fba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
index 81caff4..187bee6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
@@ -22,7 +22,7 @@ import java.net.URI;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 /**
  * File systems that support Encryption Zones have to implement this interface.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1749fba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index ef5bcfc..043d36b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 import org.apache.hadoop.util.ClassUtil;
 import org.apache.hadoop.util.DataChecksum;
 imp

hadoop git commit: HADOOP-14445. Addendum: Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances.

2018-10-15 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk ee1c80ea3 -> b6fc72a02


HADOOP-14445. Addendum: Use DelegationTokenIssuer to create KMS delegation 
tokens that can authenticate to all KMS instances.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6fc72a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6fc72a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6fc72a0

Branch: refs/heads/trunk
Commit: b6fc72a0250ac3f2341ebe8a14d19b073e6224c8
Parents: ee1c80e
Author: Xiao Chen 
Authored: Mon Oct 15 10:50:27 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 15 10:51:55 2018 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  2 +-
 .../crypto/key/KeyProviderTokenIssuer.java  |  2 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  2 +-
 .../security/token/DelegationTokenIssuer.java   |  3 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |  2 +-
 .../org/apache/hadoop/fs/TestHarFileSystem.java |  2 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  2 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 11 ++-
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 32 +++-
 9 files changed, 34 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc72a0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 29c5bcd..05d99ed 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc72a0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
index 81caff4..187bee6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java
@@ -22,7 +22,7 @@ import java.net.URI;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 /**
  * File systems that support Encryption Zones have to implement this interface.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc72a0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 3d40b6a..fe4159b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
 import org.apache.hadoop.util.ClassUtil;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc72a0/had

hadoop git commit: HADOOP-15849. Upgrade netty version to 3.10.6.

2018-10-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 603649d3a -> 8853fc8a5


HADOOP-15849. Upgrade netty version to 3.10.6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8853fc8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8853fc8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8853fc8a

Branch: refs/heads/trunk
Commit: 8853fc8a55b07ecdc5ce8d85278b822e5675d97a
Parents: 603649d
Author: Xiao Chen 
Authored: Sat Oct 13 20:21:36 2018 -0700
Committer: Xiao Chen 
Committed: Sat Oct 13 20:22:02 2018 -0700

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8853fc8a/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index b850c7c..4cdbcfb 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -961,7 +961,7 @@
   
 io.netty
 netty
-3.10.5.Final
+3.10.6.Final
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances. Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.

2018-10-12 Thread xiao
HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that 
can authenticate to all KMS instances.
Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.

(cherry picked from commit 5ec86b445cc492f52c33639efb6a09a0d2f27475)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9cb0654f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9cb0654f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9cb0654f

Branch: refs/heads/branch-3.2
Commit: 9cb0654fbb3dff15fd4ef4a9ecc81626caebfbb8
Parents: 65c1469
Author: Xiao Chen 
Authored: Fri Oct 12 09:32:21 2018 -0700
Committer: Xiao Chen 
Committed: Fri Oct 12 12:08:08 2018 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  71 ++--
 .../crypto/key/KeyProviderTokenIssuer.java  |   4 +-
 .../crypto/key/kms/KMSClientProvider.java   | 220 
 .../key/kms/LoadBalancingKMSClientProvider.java |  75 +++-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  75 +---
 .../web/DelegationTokenAuthenticatedURL.java|  25 +-
 .../security/token/DelegationTokenIssuer.java   | 112 ++
 .../java/org/apache/hadoop/util/KMSUtil.java|  13 +-
 ...TestKeyProviderDelegationTokenExtension.java |  20 +-
 .../crypto/key/kms/TestKMSClientProvider.java   | 138 
 .../kms/TestLoadBalancingKMSClientProvider.java |  63 +++-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |   3 +
 .../org/apache/hadoop/fs/TestHarFileSystem.java |   3 +
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 349 ---
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  11 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  14 +-
 .../org/apache/hadoop/hdfs/HdfsKMSUtil.java |  60 ++--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  20 +-
 18 files changed, 963 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cb0654f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 9212cbc..280ee86 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.crypto.key;
 
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
+import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 import java.io.IOException;
 
@@ -28,7 +32,8 @@ import java.io.IOException;
  */
 public class KeyProviderDelegationTokenExtension extends
 KeyProviderExtension
- {
+
+implements DelegationTokenIssuer {
   
   private static DelegationTokenExtension DEFAULT_EXTENSION = 
   new DefaultDelegationTokenExtension();
@@ -36,22 +41,9 @@ public class KeyProviderDelegationTokenExtension extends
   /**
* DelegationTokenExtension is a type of Extension that exposes methods
* needed to work with Delegation Tokens.
-   */  
-  public interface DelegationTokenExtension extends 
-KeyProviderExtension.Extension {
-
-/**
- * The implementer of this class will take a renewer and add all
- * delegation tokens associated with the renewer to the 
- * Credentials object if it is not already present, 
- * @param renewer the user allowed to renew the delegation tokens
- * @param credentials cache in which to add new delegation tokens
- * @return list of new delegation tokens
- * @throws IOException thrown if IOException if an IO error occurs.
- */
-Token[] addDelegationTokens(final String renewer,
-Credentials credentials) throws IOException;
-
+   */
+  public interface DelegationTokenExtension
+  extends KeyProviderExtension.Extension, DelegationTokenIssuer {
 /**
  * Renews the given token.
  * @param token The token to be renewed.
@@ -66,6 +58,12 @@ public class KeyProviderDelegationTokenExtension extends
  * @throws IOException
  */
 Void cancelDelegationToken(final Token token) throws IOException;
+
+// Do NOT call this. Only intended for internal use.
+@VisibleForTesting
+@InterfaceAudience.Private

[2/2] hadoop git commit: HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances. Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.

2018-10-12 Thread xiao
HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that 
can authenticate to all KMS instances.
Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff7ca472
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff7ca472
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff7ca472

Branch: refs/heads/branch-3.0
Commit: ff7ca472d220c3f19d3a8ca5c76ce87ddf201659
Parents: 53b522a
Author: Xiao Chen 
Authored: Fri Oct 12 11:50:54 2018 -0700
Committer: Xiao Chen 
Committed: Fri Oct 12 12:08:38 2018 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  71 ++--
 .../crypto/key/KeyProviderTokenIssuer.java  |   4 +-
 .../crypto/key/kms/KMSClientProvider.java   | 220 
 .../key/kms/LoadBalancingKMSClientProvider.java |  75 +++-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  75 +---
 .../web/DelegationTokenAuthenticatedURL.java|  25 +-
 .../security/token/DelegationTokenIssuer.java   | 112 ++
 .../java/org/apache/hadoop/util/KMSUtil.java|  13 +-
 ...TestKeyProviderDelegationTokenExtension.java |  20 +-
 .../crypto/key/kms/TestKMSClientProvider.java   | 138 
 .../kms/TestLoadBalancingKMSClientProvider.java |  63 +++-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |   3 +
 .../org/apache/hadoop/fs/TestHarFileSystem.java |   3 +
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 349 ---
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  11 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  14 +-
 .../org/apache/hadoop/hdfs/HdfsKMSUtil.java |  60 ++--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  20 +-
 18 files changed, 963 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff7ca472/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 9212cbc..280ee86 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.crypto.key;
 
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
+import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 import java.io.IOException;
 
@@ -28,7 +32,8 @@ import java.io.IOException;
  */
 public class KeyProviderDelegationTokenExtension extends
 KeyProviderExtension
- {
+
+implements DelegationTokenIssuer {
   
   private static DelegationTokenExtension DEFAULT_EXTENSION = 
   new DefaultDelegationTokenExtension();
@@ -36,22 +41,9 @@ public class KeyProviderDelegationTokenExtension extends
   /**
* DelegationTokenExtension is a type of Extension that exposes methods
* needed to work with Delegation Tokens.
-   */  
-  public interface DelegationTokenExtension extends 
-KeyProviderExtension.Extension {
-
-/**
- * The implementer of this class will take a renewer and add all
- * delegation tokens associated with the renewer to the 
- * Credentials object if it is not already present, 
- * @param renewer the user allowed to renew the delegation tokens
- * @param credentials cache in which to add new delegation tokens
- * @return list of new delegation tokens
- * @throws IOException thrown if IOException if an IO error occurs.
- */
-Token[] addDelegationTokens(final String renewer,
-Credentials credentials) throws IOException;
-
+   */
+  public interface DelegationTokenExtension
+  extends KeyProviderExtension.Extension, DelegationTokenIssuer {
 /**
  * Renews the given token.
  * @param token The token to be renewed.
@@ -66,6 +58,12 @@ public class KeyProviderDelegationTokenExtension extends
  * @throws IOException
  */
 Void cancelDelegationToken(final Token token) throws IOException;
+
+// Do NOT call this. Only intended for internal use.
+@VisibleForTesting
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+Token selectDelegationToken(Credentials creds

[1/2] hadoop git commit: HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances. Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.

2018-10-12 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 53b522af6 -> ff7ca472d


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff7ca472/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
index de27f7e..30e8aa7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
@@ -35,14 +35,12 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
-import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.KMSUtil;
 
 /**
@@ -71,32 +69,6 @@ public final class HdfsKMSUtil {
 return KMSUtil.createKeyProvider(conf, keyProviderUriKeyName);
   }
 
-  public static Token[] addDelegationTokensForKeyProvider(
-  KeyProviderTokenIssuer kpTokenIssuer, final String renewer,
-  Credentials credentials, URI namenodeUri, Token[] tokens)
-  throws IOException {
-KeyProvider keyProvider = kpTokenIssuer.getKeyProvider();
-if (keyProvider != null) {
-  KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension
-  = KeyProviderDelegationTokenExtension.
-  createKeyProviderDelegationTokenExtension(keyProvider);
-  Token[] kpTokens = keyProviderDelegationTokenExtension.
-  addDelegationTokens(renewer, credentials);
-  credentials.addSecretKey(getKeyProviderMapKey(namenodeUri),
-  DFSUtilClient.string2Bytes(
-  kpTokenIssuer.getKeyProviderUri().toString()));
-  if (tokens != null && kpTokens != null) {
-Token[] all = new Token[tokens.length + kpTokens.length];
-System.arraycopy(tokens, 0, all, 0, tokens.length);
-System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
-tokens = all;
-  } else {
-tokens = (tokens != null) ? tokens : kpTokens;
-  }
-}
-return tokens;
-  }
-
   /**
* Obtain the crypto protocol version from the provided FileEncryptionInfo,
* checking to see if this version is supported by.
@@ -161,28 +133,36 @@ public final class HdfsKMSUtil {
 URI keyProviderUri = null;
 // Lookup the secret in credentials object for namenodeuri.
 Credentials credentials = ugi.getCredentials();
+Text credsKey = getKeyProviderMapKey(namenodeUri);
 byte[] keyProviderUriBytes =
-credentials.getSecretKey(getKeyProviderMapKey(namenodeUri));
+credentials.getSecretKey(credsKey);
 if(keyProviderUriBytes != null) {
   keyProviderUri =
   URI.create(DFSUtilClient.bytes2String(keyProviderUriBytes));
-  return keyProviderUri;
 }
-
-if (keyProviderUriStr != null) {
-  if (!keyProviderUriStr.isEmpty()) {
+if (keyProviderUri == null) {
+  // NN is old and doesn't report provider, so use conf.
+  if (keyProviderUriStr == null) {
+keyProviderUri = KMSUtil.getKeyProviderUri(conf, 
keyProviderUriKeyName);
+  } else if (!keyProviderUriStr.isEmpty()) {
 keyProviderUri = URI.create(keyProviderUriStr);
   }
-  return keyProviderUri;
+  if (keyProviderUri != null) {
+credentials.addSecretKey(
+credsKey, DFSUtilClient.string2Bytes(keyProviderUri.toString()));
+  }
 }
+return keyProviderUri;
+  }
 
-// Last thing is to trust its own conf to be backwards compatible.
-String keyProviderUriFromConf = conf.getTrimmed(
-CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
-if (keyProviderUriFromConf != null && !keyProviderUriFromConf.isEmpty()) {
-  keyProviderUri = URI.create(keyProviderUriFromConf);
+  public static KeyProvider getKeyProvider(KeyProviderTokenIssuer issuer,
+   Configuration conf)
+  throws IOException {
+URI keyProviderUri = issuer.getKeyProviderUri();
+if (keyProviderUri != null) {
+  return KMSUtil.createKeyProviderFromUri(conf, keyProviderUri);
 }
-return keyProviderUri;
+return null;
   }
 
   /**


[2/2] hadoop git commit: HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances. Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.

2018-10-12 Thread xiao
HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that 
can authenticate to all KMS instances.
Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.

(cherry picked from commit 5ec86b445cc492f52c33639efb6a09a0d2f27475)
(cherry picked from commit e93e401f492c7fc112da3d0c63b3b186c1b196ec)

 Conflicts:

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a1ce74f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a1ce74f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a1ce74f

Branch: refs/heads/branch-3.1
Commit: 6a1ce74fb19226daca1a04f04576c10451c62cd6
Parents: 6342a7c
Author: Xiao Chen 
Authored: Fri Oct 12 09:32:21 2018 -0700
Committer: Xiao Chen 
Committed: Fri Oct 12 12:08:26 2018 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  71 ++--
 .../crypto/key/KeyProviderTokenIssuer.java  |   4 +-
 .../crypto/key/kms/KMSClientProvider.java   | 220 
 .../key/kms/LoadBalancingKMSClientProvider.java |  75 +++-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  75 +---
 .../web/DelegationTokenAuthenticatedURL.java|  25 +-
 .../security/token/DelegationTokenIssuer.java   | 112 ++
 .../java/org/apache/hadoop/util/KMSUtil.java|  13 +-
 ...TestKeyProviderDelegationTokenExtension.java |  20 +-
 .../crypto/key/kms/TestKMSClientProvider.java   | 138 
 .../kms/TestLoadBalancingKMSClientProvider.java |  63 +++-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |   3 +
 .../org/apache/hadoop/fs/TestHarFileSystem.java |   3 +
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 349 ---
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  11 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  14 +-
 .../org/apache/hadoop/hdfs/HdfsKMSUtil.java |  60 ++--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  20 +-
 18 files changed, 963 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a1ce74f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 9212cbc..280ee86 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.crypto.key;
 
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
+import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 import java.io.IOException;
 
@@ -28,7 +32,8 @@ import java.io.IOException;
  */
 public class KeyProviderDelegationTokenExtension extends
 KeyProviderExtension
- {
+
+implements DelegationTokenIssuer {
   
   private static DelegationTokenExtension DEFAULT_EXTENSION = 
   new DefaultDelegationTokenExtension();
@@ -36,22 +41,9 @@ public class KeyProviderDelegationTokenExtension extends
   /**
* DelegationTokenExtension is a type of Extension that exposes methods
* needed to work with Delegation Tokens.
-   */  
-  public interface DelegationTokenExtension extends 
-KeyProviderExtension.Extension {
-
-/**
- * The implementer of this class will take a renewer and add all
- * delegation tokens associated with the renewer to the 
- * Credentials object if it is not already present, 
- * @param renewer the user allowed to renew the delegation tokens
- * @param credentials cache in which to add new delegation tokens
- * @return list of new delegation tokens
- * @throws IOException thrown if IOException if an IO error occurs.
- */
-Token[] addDelegationTokens(final String renewer,
-Credentials credentials) throws IOException;
-
+   */
+  public interface DelegationTokenExtension
+  extends KeyProviderExtension.Extension, DelegationTokenIssuer {
 /**
  * Renews the given token.
  * @param token The token to be renewed.
@@ -66,6 +58,12 @@ public class KeyProviderDelegationTokenExtension extends
  * @throws IOException

[1/2] hadoop git commit: HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances. Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.

2018-10-12 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 65c1469b1 -> 9cb0654fb


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cb0654f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
index de27f7e..30e8aa7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
@@ -35,14 +35,12 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
-import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.KMSUtil;
 
 /**
@@ -71,32 +69,6 @@ public final class HdfsKMSUtil {
 return KMSUtil.createKeyProvider(conf, keyProviderUriKeyName);
   }
 
-  public static Token[] addDelegationTokensForKeyProvider(
-  KeyProviderTokenIssuer kpTokenIssuer, final String renewer,
-  Credentials credentials, URI namenodeUri, Token[] tokens)
-  throws IOException {
-KeyProvider keyProvider = kpTokenIssuer.getKeyProvider();
-if (keyProvider != null) {
-  KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension
-  = KeyProviderDelegationTokenExtension.
-  createKeyProviderDelegationTokenExtension(keyProvider);
-  Token[] kpTokens = keyProviderDelegationTokenExtension.
-  addDelegationTokens(renewer, credentials);
-  credentials.addSecretKey(getKeyProviderMapKey(namenodeUri),
-  DFSUtilClient.string2Bytes(
-  kpTokenIssuer.getKeyProviderUri().toString()));
-  if (tokens != null && kpTokens != null) {
-Token[] all = new Token[tokens.length + kpTokens.length];
-System.arraycopy(tokens, 0, all, 0, tokens.length);
-System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
-tokens = all;
-  } else {
-tokens = (tokens != null) ? tokens : kpTokens;
-  }
-}
-return tokens;
-  }
-
   /**
* Obtain the crypto protocol version from the provided FileEncryptionInfo,
* checking to see if this version is supported by.
@@ -161,28 +133,36 @@ public final class HdfsKMSUtil {
 URI keyProviderUri = null;
 // Lookup the secret in credentials object for namenodeuri.
 Credentials credentials = ugi.getCredentials();
+Text credsKey = getKeyProviderMapKey(namenodeUri);
 byte[] keyProviderUriBytes =
-credentials.getSecretKey(getKeyProviderMapKey(namenodeUri));
+credentials.getSecretKey(credsKey);
 if(keyProviderUriBytes != null) {
   keyProviderUri =
   URI.create(DFSUtilClient.bytes2String(keyProviderUriBytes));
-  return keyProviderUri;
 }
-
-if (keyProviderUriStr != null) {
-  if (!keyProviderUriStr.isEmpty()) {
+if (keyProviderUri == null) {
+  // NN is old and doesn't report provider, so use conf.
+  if (keyProviderUriStr == null) {
+keyProviderUri = KMSUtil.getKeyProviderUri(conf, 
keyProviderUriKeyName);
+  } else if (!keyProviderUriStr.isEmpty()) {
 keyProviderUri = URI.create(keyProviderUriStr);
   }
-  return keyProviderUri;
+  if (keyProviderUri != null) {
+credentials.addSecretKey(
+credsKey, DFSUtilClient.string2Bytes(keyProviderUri.toString()));
+  }
 }
+return keyProviderUri;
+  }
 
-// Last thing is to trust its own conf to be backwards compatible.
-String keyProviderUriFromConf = conf.getTrimmed(
-CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
-if (keyProviderUriFromConf != null && !keyProviderUriFromConf.isEmpty()) {
-  keyProviderUri = URI.create(keyProviderUriFromConf);
+  public static KeyProvider getKeyProvider(KeyProviderTokenIssuer issuer,
+   Configuration conf)
+  throws IOException {
+URI keyProviderUri = issuer.getKeyProviderUri();
+if (keyProviderUri != null) {
+  return KMSUtil.createKeyProviderFromUri(conf, keyProviderUri);
 }
-return keyProviderUri;
+return null;
   }
 
   /**


[1/2] hadoop git commit: HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances. Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.

2018-10-12 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 6342a7cb9 -> 6a1ce74fb


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a1ce74f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
index de27f7e..30e8aa7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
@@ -35,14 +35,12 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
-import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.KMSUtil;
 
 /**
@@ -71,32 +69,6 @@ public final class HdfsKMSUtil {
 return KMSUtil.createKeyProvider(conf, keyProviderUriKeyName);
   }
 
-  public static Token[] addDelegationTokensForKeyProvider(
-  KeyProviderTokenIssuer kpTokenIssuer, final String renewer,
-  Credentials credentials, URI namenodeUri, Token[] tokens)
-  throws IOException {
-KeyProvider keyProvider = kpTokenIssuer.getKeyProvider();
-if (keyProvider != null) {
-  KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension
-  = KeyProviderDelegationTokenExtension.
-  createKeyProviderDelegationTokenExtension(keyProvider);
-  Token[] kpTokens = keyProviderDelegationTokenExtension.
-  addDelegationTokens(renewer, credentials);
-  credentials.addSecretKey(getKeyProviderMapKey(namenodeUri),
-  DFSUtilClient.string2Bytes(
-  kpTokenIssuer.getKeyProviderUri().toString()));
-  if (tokens != null && kpTokens != null) {
-Token[] all = new Token[tokens.length + kpTokens.length];
-System.arraycopy(tokens, 0, all, 0, tokens.length);
-System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
-tokens = all;
-  } else {
-tokens = (tokens != null) ? tokens : kpTokens;
-  }
-}
-return tokens;
-  }
-
   /**
* Obtain the crypto protocol version from the provided FileEncryptionInfo,
* checking to see if this version is supported by.
@@ -161,28 +133,36 @@ public final class HdfsKMSUtil {
 URI keyProviderUri = null;
 // Lookup the secret in credentials object for namenodeuri.
 Credentials credentials = ugi.getCredentials();
+Text credsKey = getKeyProviderMapKey(namenodeUri);
 byte[] keyProviderUriBytes =
-credentials.getSecretKey(getKeyProviderMapKey(namenodeUri));
+credentials.getSecretKey(credsKey);
 if(keyProviderUriBytes != null) {
   keyProviderUri =
   URI.create(DFSUtilClient.bytes2String(keyProviderUriBytes));
-  return keyProviderUri;
 }
-
-if (keyProviderUriStr != null) {
-  if (!keyProviderUriStr.isEmpty()) {
+if (keyProviderUri == null) {
+  // NN is old and doesn't report provider, so use conf.
+  if (keyProviderUriStr == null) {
+keyProviderUri = KMSUtil.getKeyProviderUri(conf, 
keyProviderUriKeyName);
+  } else if (!keyProviderUriStr.isEmpty()) {
 keyProviderUri = URI.create(keyProviderUriStr);
   }
-  return keyProviderUri;
+  if (keyProviderUri != null) {
+credentials.addSecretKey(
+credsKey, DFSUtilClient.string2Bytes(keyProviderUri.toString()));
+  }
 }
+return keyProviderUri;
+  }
 
-// Last thing is to trust its own conf to be backwards compatible.
-String keyProviderUriFromConf = conf.getTrimmed(
-CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
-if (keyProviderUriFromConf != null && !keyProviderUriFromConf.isEmpty()) {
-  keyProviderUri = URI.create(keyProviderUriFromConf);
+  public static KeyProvider getKeyProvider(KeyProviderTokenIssuer issuer,
+   Configuration conf)
+  throws IOException {
+URI keyProviderUri = issuer.getKeyProviderUri();
+if (keyProviderUri != null) {
+  return KMSUtil.createKeyProviderFromUri(conf, keyProviderUri);
 }
-return keyProviderUri;
+return null;
   }
 
   /**


[2/2] hadoop git commit: HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances. Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.

2018-10-12 Thread xiao
HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that 
can authenticate to all KMS instances.
Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ec86b44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ec86b44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ec86b44

Branch: refs/heads/trunk
Commit: 5ec86b445cc492f52c33639efb6a09a0d2f27475
Parents: 6e0e6da
Author: Xiao Chen 
Authored: Fri Oct 12 09:32:21 2018 -0700
Committer: Xiao Chen 
Committed: Fri Oct 12 09:35:52 2018 -0700

--
 .../KeyProviderDelegationTokenExtension.java|  71 ++--
 .../crypto/key/KeyProviderTokenIssuer.java  |   4 +-
 .../crypto/key/kms/KMSClientProvider.java   | 220 
 .../key/kms/LoadBalancingKMSClientProvider.java |  75 +++-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  75 +---
 .../web/DelegationTokenAuthenticatedURL.java|  25 +-
 .../security/token/DelegationTokenIssuer.java   | 112 ++
 .../java/org/apache/hadoop/util/KMSUtil.java|  13 +-
 ...TestKeyProviderDelegationTokenExtension.java |  20 +-
 .../crypto/key/kms/TestKMSClientProvider.java   | 138 
 .../kms/TestLoadBalancingKMSClientProvider.java |  63 +++-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |   3 +
 .../org/apache/hadoop/fs/TestHarFileSystem.java |   3 +
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 349 ---
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  11 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  14 +-
 .../org/apache/hadoop/hdfs/HdfsKMSUtil.java |  60 ++--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  20 +-
 18 files changed, 963 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec86b44/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index a63b7d5..29c5bcd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.crypto.key;
 
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
+import 
org.apache.hadoop.security.token.org.apache.hadoop.security.token.DelegationTokenIssuer;
 
 import java.io.IOException;
 
@@ -28,7 +32,8 @@ import java.io.IOException;
  */
 public class KeyProviderDelegationTokenExtension extends
 KeyProviderExtension
- {
+
+implements DelegationTokenIssuer {
   
   private static DelegationTokenExtension DEFAULT_EXTENSION = 
   new DefaultDelegationTokenExtension();
@@ -36,22 +41,9 @@ public class KeyProviderDelegationTokenExtension extends
   /**
* DelegationTokenExtension is a type of Extension that exposes methods
* needed to work with Delegation Tokens.
-   */  
-  public interface DelegationTokenExtension extends 
-KeyProviderExtension.Extension {
-
-/**
- * The implementer of this class will take a renewer and add all
- * delegation tokens associated with the renewer to the 
- * Credentials object if it is not already present, 
- * @param renewer the user allowed to renew the delegation tokens
- * @param credentials cache in which to add new delegation tokens
- * @return list of new delegation tokens
- * @throws IOException thrown if IOException if an IO error occurs.
- */
-Token[] addDelegationTokens(final String renewer,
-Credentials credentials) throws IOException;
-
+   */
+  public interface DelegationTokenExtension
+  extends KeyProviderExtension.Extension, DelegationTokenIssuer {
 /**
  * Renews the given token.
  * @param token The token to be renewed.
@@ -66,6 +58,12 @@ public class KeyProviderDelegationTokenExtension extends
  * @throws IOException
  */
 Void cancelDelegationToken(final Token token) throws IOException;
+
+// Do NOT call this. Only intended for internal use.
+@VisibleForTesting
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+Token selectDelegationToken(Credentials creds

[1/2] hadoop git commit: HADOOP-14445. Use DelegationTokenIssuer to create KMS delegation tokens that can authenticate to all KMS instances. Contributed by Daryn Sharp, Xiao Chen, Rushabh S Shah.

2018-10-12 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6e0e6daaf -> 5ec86b445


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec86b44/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
index de27f7e..30e8aa7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java
@@ -35,14 +35,12 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
-import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.KMSUtil;
 
 /**
@@ -71,32 +69,6 @@ public final class HdfsKMSUtil {
 return KMSUtil.createKeyProvider(conf, keyProviderUriKeyName);
   }
 
-  public static Token[] addDelegationTokensForKeyProvider(
-  KeyProviderTokenIssuer kpTokenIssuer, final String renewer,
-  Credentials credentials, URI namenodeUri, Token[] tokens)
-  throws IOException {
-KeyProvider keyProvider = kpTokenIssuer.getKeyProvider();
-if (keyProvider != null) {
-  KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension
-  = KeyProviderDelegationTokenExtension.
-  createKeyProviderDelegationTokenExtension(keyProvider);
-  Token[] kpTokens = keyProviderDelegationTokenExtension.
-  addDelegationTokens(renewer, credentials);
-  credentials.addSecretKey(getKeyProviderMapKey(namenodeUri),
-  DFSUtilClient.string2Bytes(
-  kpTokenIssuer.getKeyProviderUri().toString()));
-  if (tokens != null && kpTokens != null) {
-Token[] all = new Token[tokens.length + kpTokens.length];
-System.arraycopy(tokens, 0, all, 0, tokens.length);
-System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
-tokens = all;
-  } else {
-tokens = (tokens != null) ? tokens : kpTokens;
-  }
-}
-return tokens;
-  }
-
   /**
* Obtain the crypto protocol version from the provided FileEncryptionInfo,
* checking to see if this version is supported by.
@@ -161,28 +133,36 @@ public final class HdfsKMSUtil {
 URI keyProviderUri = null;
 // Lookup the secret in credentials object for namenodeuri.
 Credentials credentials = ugi.getCredentials();
+Text credsKey = getKeyProviderMapKey(namenodeUri);
 byte[] keyProviderUriBytes =
-credentials.getSecretKey(getKeyProviderMapKey(namenodeUri));
+credentials.getSecretKey(credsKey);
 if(keyProviderUriBytes != null) {
   keyProviderUri =
   URI.create(DFSUtilClient.bytes2String(keyProviderUriBytes));
-  return keyProviderUri;
 }
-
-if (keyProviderUriStr != null) {
-  if (!keyProviderUriStr.isEmpty()) {
+if (keyProviderUri == null) {
+  // NN is old and doesn't report provider, so use conf.
+  if (keyProviderUriStr == null) {
+keyProviderUri = KMSUtil.getKeyProviderUri(conf, 
keyProviderUriKeyName);
+  } else if (!keyProviderUriStr.isEmpty()) {
 keyProviderUri = URI.create(keyProviderUriStr);
   }
-  return keyProviderUri;
+  if (keyProviderUri != null) {
+credentials.addSecretKey(
+credsKey, DFSUtilClient.string2Bytes(keyProviderUri.toString()));
+  }
 }
+return keyProviderUri;
+  }
 
-// Last thing is to trust its own conf to be backwards compatible.
-String keyProviderUriFromConf = conf.getTrimmed(
-CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
-if (keyProviderUriFromConf != null && !keyProviderUriFromConf.isEmpty()) {
-  keyProviderUri = URI.create(keyProviderUriFromConf);
+  public static KeyProvider getKeyProvider(KeyProviderTokenIssuer issuer,
+   Configuration conf)
+  throws IOException {
+URI keyProviderUri = issuer.getKeyProviderUri();
+if (keyProviderUri != null) {
+  return KMSUtil.createKeyProviderFromUri(conf, keyProviderUri);
 }
-return keyProviderUri;
+return null;
   }
 
   /**


hadoop git commit: HDFS-13973. getErasureCodingPolicy should log path in audit event. Contributed by Shweta.

2018-10-11 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk a3edfddcf -> c0118105b


HDFS-13973. getErasureCodingPolicy should log path in audit event. Contributed 
by Shweta.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0118105
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0118105
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0118105

Branch: refs/heads/trunk
Commit: c0118105b7486793bcaca48786d5f9d3e0ee2ff3
Parents: a3edfdd
Author: Shweta 
Authored: Thu Oct 11 20:38:19 2018 -0700
Committer: Xiao Chen 
Committed: Thu Oct 11 20:39:41 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0118105/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 137dfb9..cc38036 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7728,7 +7728,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   return ret;
 } finally {
   readUnlock(operationName);
-  logAuditEvent(success, operationName, null);
+  logAuditEvent(success, operationName, src);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15676. Cleanup TestSSLHttpServer. Contributed by Szilard Nemeth.

2018-10-11 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk c05b26006 -> 64f2b32d5


HADOOP-15676. Cleanup TestSSLHttpServer. Contributed by Szilard Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64f2b32d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64f2b32d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64f2b32d

Branch: refs/heads/trunk
Commit: 64f2b32d57f35864b5c47b7e80f02e9c939f592a
Parents: c05b260
Author: Xiao Chen 
Authored: Thu Oct 11 15:08:22 2018 -0700
Committer: Xiao Chen 
Committed: Thu Oct 11 15:12:36 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 204 ++-
 1 file changed, 103 insertions(+), 101 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64f2b32d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 2166464..3f6ee7b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -24,8 +24,9 @@ import java.io.InputStream;
 import java.net.InetAddress;
 import java.net.Socket;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.net.URL;
-import java.net.UnknownHostException;
+import java.security.GeneralSecurityException;
 
 import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.SSLHandshakeException;
@@ -34,6 +35,7 @@ import javax.net.ssl.SSLSocketFactory;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.http.TestHttpServer.EchoServlet;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
@@ -60,23 +62,34 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
   LoggerFactory.getLogger(TestSSLHttpServer.class);
   private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
   private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
-  private static Configuration conf;
+  private static final String SSL_SERVER_KEYSTORE_PROP_PREFIX = "ssl.server" +
+  ".keystore";
+  private static final String SSL_SERVER_TRUSTSTORE_PROP_PREFIX = "ssl.server" 
+
+  ".truststore";
+
+  private static final String SERVLET_NAME_LONGHEADER = "longheader";
+  private static final String SERVLET_PATH_LONGHEADER =
+  "/" + SERVLET_NAME_LONGHEADER;
+  private static final String SERVLET_NAME_ECHO = "echo";
+  private static final String SERVLET_PATH_ECHO = "/" + SERVLET_NAME_ECHO;
+
   private static HttpServer2 server;
-  private static String keystoresDir;
+  private static String keystoreDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
   private static String cipherSuitesPropertyValue;
   private static String sslDebugPropertyValue;
-  private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
+  private static final String EXCLUDED_CIPHERS =
+  "TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
   + "SSL_DHE_RSA_WITH_DES_CBC_SHA,  "
   + "SSL_RSA_EXPORT_WITH_RC4_40_MD5,\t \n"
   + "SSL_RSA_EXPORT_WITH_DES40_CBC_SHA,"
   + "SSL_RSA_WITH_RC4_128_MD5 \t";
-  private static final String oneEnabledCiphers = excludeCiphers
+  private static final String ONE_ENABLED_CIPHERS = EXCLUDED_CIPHERS
   + ",TLS_RSA_WITH_AES_128_CBC_SHA";
-  private static final String exclusiveEnabledCiphers
+  private static final String EXCLUSIVE_ENABLED_CIPHERS
   = "\tTLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, \n"
   + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,"
   + "TLS_RSA_WITH_AES_128_CBC_SHA,"
@@ -90,49 +103,54 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 turnOnSSLDebugLogging();
 storeHttpsCipherSuites();
 
-conf = new Configuration();
+Configuration conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
 
 File base = new File(BASEDIR);
 FileUtil.fullyDelete(base);
 base.mkdirs();
-keystoresDir = new File(BASEDIR).getAbsolutePath();
+keystoreDir =

hadoop git commit: HDFS-13882. Set a maximum delay for retrying locateFollowingBlock. Contributed by Kitti Nanasi.

2018-10-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 90552b1ea -> 10185d9a7


HDFS-13882. Set a maximum delay for retrying locateFollowingBlock. Contributed 
by Kitti Nanasi.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10185d9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10185d9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10185d9a

Branch: refs/heads/trunk
Commit: 10185d9a77ce07080588f3c77399a07cd7ccf427
Parents: 90552b1
Author: Kitti Nanasi 
Authored: Wed Oct 10 08:54:50 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 10 08:55:32 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 21 -
 .../hdfs/client/HdfsClientConfigKeys.java   |  3 ++
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  8 
 .../src/main/resources/hdfs-default.xml | 13 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   | 47 +++-
 .../hadoop/tools/TestHdfsConfigFields.java  |  1 +
 6 files changed, 80 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10185d9a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index e977054..b8aae97 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -942,6 +942,7 @@ public class DFSOutputStream extends FSOutputSummer
 long localstart = Time.monotonicNow();
 final DfsClientConf conf = dfsClient.getConf();
 long sleeptime = conf.getBlockWriteLocateFollowingInitialDelayMs();
+long maxSleepTime = conf.getBlockWriteLocateFollowingMaxDelayMs();
 boolean fileComplete = false;
 int retries = conf.getNumBlockWriteLocateFollowingRetry();
 while (!fileComplete) {
@@ -965,7 +966,7 @@ public class DFSOutputStream extends FSOutputSummer
   }
   retries--;
   Thread.sleep(sleeptime);
-  sleeptime *= 2;
+  sleeptime = calculateDelayForNextRetry(sleeptime, maxSleepTime);
   if (Time.monotonicNow() - localstart > 5000) {
 DFSClient.LOG.info("Could not complete " + src + " retrying...");
   }
@@ -1075,6 +1076,7 @@ public class DFSOutputStream extends FSOutputSummer
 final DfsClientConf conf = dfsClient.getConf();
 int retries = conf.getNumBlockWriteLocateFollowingRetry();
 long sleeptime = conf.getBlockWriteLocateFollowingInitialDelayMs();
+long maxSleepTime = conf.getBlockWriteLocateFollowingMaxDelayMs();
 long localstart = Time.monotonicNow();
 while (true) {
   try {
@@ -1106,7 +1108,7 @@ public class DFSOutputStream extends FSOutputSummer
   LOG.warn("NotReplicatedYetException sleeping " + src
   + " retries left " + retries);
   Thread.sleep(sleeptime);
-  sleeptime *= 2;
+  sleeptime = calculateDelayForNextRetry(sleeptime, maxSleepTime);
 } catch (InterruptedException ie) {
   LOG.warn("Caught exception", ie);
 }
@@ -1117,4 +1119,19 @@ public class DFSOutputStream extends FSOutputSummer
   }
 }
   }
+
+  /**
+   * Calculates the delay for the next retry.
+   *
+   * The delay is increased exponentially until the maximum delay is reached.
+   *
+   * @param previousDelay delay for the previous retry
+   * @param maxDelay maximum delay
+   * @return the minimum of the double of the previous delay
+   * and the maximum delay
+   */
+  private static long calculateDelayForNextRetry(long previousDelay,
+ long maxDelay) {
+return Math.min(previousDelay * 2, maxDelay);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10185d9a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index a812670..b1ce78d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop

hadoop git commit: HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect with striped reads. Contributed by Xiao Chen, Hrishikesh Gadre.

2018-10-09 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 af85ce6ae -> b170de8be


HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect 
with striped reads.
Contributed by Xiao Chen, Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 323b76bccfa153ef5ba52dc14876283d05618739)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b170de8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b170de8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b170de8b

Branch: refs/heads/branch-3.0
Commit: b170de8be5234015794d1e9eb09f5e69c7a2af25
Parents: af85ce6
Author: Hrishikesh Gadre 
Authored: Tue Oct 9 16:42:22 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 9 19:54:56 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  5 ++
 .../hadoop/hdfs/DFSStripedInputStream.java  | 20 ++
 .../org/apache/hadoop/hdfs/ReaderStrategy.java  |  7 ---
 .../org/apache/hadoop/hdfs/StripeReader.java| 23 ---
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  | 10 ++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 65 ++--
 .../erasurecode/ErasureCodingWorker.java|  3 +-
 .../erasurecode/StripedBlockReader.java | 14 +++--
 .../datanode/erasurecode/StripedReader.java | 17 ++---
 .../erasurecode/StripedReconstructor.java   |  3 +-
 .../TestDistributedFileSystemWithECFile.java| 38 
 11 files changed, 167 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b170de8b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index ae24572..827db47 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -89,6 +89,8 @@ import com.google.common.annotations.VisibleForTesting;
 
 import javax.annotation.Nonnull;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /
  * DFSInputStream provides bytes from a named file.  It handles
  * negotiation of the namenode and various datanodes as necessary.
@@ -768,6 +770,9 @@ public class DFSInputStream extends FSInputStream
 // got a EOS from reader though we expect more data on it.
 throw new IOException("Unexpected EOS from the reader");
   }
+  updateReadStatistics(readStatistics, result, blockReader);
+  dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
+  result);
   return result;
 } catch (ChecksumException ce) {
   throw ce;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b170de8b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 190ba8e..9ec3e0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -53,6 +53,8 @@ import java.util.Collection;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /**
  * DFSStripedInputStream reads from striped block groups.
  */
@@ -327,6 +329,24 @@ public class DFSStripedInputStream extends DFSInputStream {
   }
 
   /**
+   * Update read statistics. Note that this has to be done on the thread that
+   * initiates the read, rather than inside each async thread, for
+   * {@link org.apache.hadoop.fs.FileSystem.Statistics} to work correctly with
+   * its ThreadLocal.
+   *
+   * @param stats striped read stats
+   */
+  void updateReadStats(final StripedBlockUtil.BlockReadStats stats) {
+if (stats == null) {
+  return;
+}
+updateReadStatistics(readStatistics, stats.getBytesRead(),
+stats.isShortCircuit(), stats.getNetworkDistance());
+dfsClient.updateFileSystemReadStats(stats.getNetworkDistance(),
+stats

hadoop git commit: HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect with striped reads. Contributed by Xiao Chen, Hrishikesh Gadre.

2018-10-09 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 3968ce107 -> 323b76bcc


HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect 
with striped reads.
Contributed by Xiao Chen, Hrishikesh Gadre.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/323b76bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/323b76bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/323b76bc

Branch: refs/heads/branch-3.1
Commit: 323b76bccfa153ef5ba52dc14876283d05618739
Parents: 3968ce1
Author: Hrishikesh Gadre 
Authored: Tue Oct 9 16:42:22 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 9 19:54:34 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  5 ++
 .../hadoop/hdfs/DFSStripedInputStream.java  | 20 ++
 .../org/apache/hadoop/hdfs/ReaderStrategy.java  |  7 ---
 .../org/apache/hadoop/hdfs/StripeReader.java| 23 ---
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  | 10 ++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 65 ++--
 .../erasurecode/ErasureCodingWorker.java|  3 +-
 .../erasurecode/StripedBlockReader.java | 14 +++--
 .../datanode/erasurecode/StripedReader.java | 17 ++---
 .../erasurecode/StripedReconstructor.java   |  3 +-
 .../TestDistributedFileSystemWithECFile.java| 38 
 11 files changed, 167 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/323b76bc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 75eb2ea..98c2c9e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -89,6 +89,8 @@ import com.google.common.annotations.VisibleForTesting;
 
 import javax.annotation.Nonnull;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /
  * DFSInputStream provides bytes from a named file.  It handles
  * negotiation of the namenode and various datanodes as necessary.
@@ -768,6 +770,9 @@ public class DFSInputStream extends FSInputStream
 // got a EOS from reader though we expect more data on it.
 throw new IOException("Unexpected EOS from the reader");
   }
+  updateReadStatistics(readStatistics, result, blockReader);
+  dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
+  result);
   return result;
 } catch (ChecksumException ce) {
   throw ce;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/323b76bc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 190ba8e..9ec3e0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -53,6 +53,8 @@ import java.util.Collection;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /**
  * DFSStripedInputStream reads from striped block groups.
  */
@@ -327,6 +329,24 @@ public class DFSStripedInputStream extends DFSInputStream {
   }
 
   /**
+   * Update read statistics. Note that this has to be done on the thread that
+   * initiates the read, rather than inside each async thread, for
+   * {@link org.apache.hadoop.fs.FileSystem.Statistics} to work correctly with
+   * its ThreadLocal.
+   *
+   * @param stats striped read stats
+   */
+  void updateReadStats(final StripedBlockUtil.BlockReadStats stats) {
+if (stats == null) {
+  return;
+}
+updateReadStatistics(readStatistics, stats.getBytesRead(),
+stats.isShortCircuit(), stats.getNetworkDistance());
+dfsClient.updateFileSystemReadStats(stats.getNetworkDistance(),
+stats.getBytesRead());
+  }
+
+  /**
* Seek to a new arbitrary location.
*/
   @O

hadoop git commit: HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect with striped reads. Contributed by Xiao Chen, Hrishikesh Gadre.

2018-10-08 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 b6698e2a8 -> a99658cd8


HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect 
with striped reads.
Contributed by Xiao Chen, Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 08bb6c49a5aec32b7d9f29238560f947420405d6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a99658cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a99658cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a99658cd

Branch: refs/heads/branch-3.2
Commit: a99658cd8504edfe7b80f979eb25795c16726dcd
Parents: b6698e2
Author: Hrishikesh Gadre 
Authored: Mon Oct 8 20:30:53 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 8 20:39:48 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  8 +++
 .../hadoop/hdfs/DFSStripedInputStream.java  | 22 +++
 .../org/apache/hadoop/hdfs/ReaderStrategy.java  | 15 -
 .../org/apache/hadoop/hdfs/StripeReader.java| 23 ---
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  | 10 ++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 65 ++--
 .../erasurecode/ErasureCodingWorker.java|  3 +-
 .../erasurecode/StripedBlockReader.java | 14 +++--
 .../datanode/erasurecode/StripedReader.java | 17 ++---
 .../erasurecode/StripedReconstructor.java   |  3 +-
 .../TestDistributedFileSystemWithECFile.java| 44 +
 11 files changed, 178 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a99658cd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index e5640d2..52ed1d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -90,6 +90,8 @@ import com.google.common.annotations.VisibleForTesting;
 
 import javax.annotation.Nonnull;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /
  * DFSInputStream provides bytes from a named file.  It handles
  * negotiation of the namenode and various datanodes as necessary.
@@ -769,6 +771,12 @@ public class DFSInputStream extends FSInputStream
 // got a EOS from reader though we expect more data on it.
 throw new IOException("Unexpected EOS from the reader");
   }
+  updateReadStatistics(readStatistics, result, blockReader);
+  dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
+  result);
+  if (readStatistics.getBlockType() == BlockType.STRIPED) {
+dfsClient.updateFileSystemECReadStats(result);
+  }
   return result;
 } catch (ChecksumException ce) {
   throw ce;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a99658cd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 5557a50..3f688d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -54,6 +54,8 @@ import java.util.Collection;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /**
  * DFSStripedInputStream reads from striped block groups.
  */
@@ -329,6 +331,26 @@ public class DFSStripedInputStream extends DFSInputStream {
   }
 
   /**
+   * Update read statistics. Note that this has to be done on the thread that
+   * initiates the read, rather than inside each async thread, for
+   * {@link org.apache.hadoop.fs.FileSystem.Statistics} to work correctly with
+   * its ThreadLocal.
+   *
+   * @param stats striped read stats
+   */
+  void updateReadStats(final StripedBlockUtil.BlockReadStats stats) {
+if (stats == null) {
+  return;
+}
+updateReadStatistics(readStatistics, stats

hadoop git commit: HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect with striped reads. Contributed by Xiao Chen, Hrishikesh Gadre.

2018-10-08 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1043795f7 -> 08bb6c49a


HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect 
with striped reads.
Contributed by Xiao Chen, Hrishikesh Gadre.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08bb6c49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08bb6c49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08bb6c49

Branch: refs/heads/trunk
Commit: 08bb6c49a5aec32b7d9f29238560f947420405d6
Parents: 1043795
Author: Hrishikesh Gadre 
Authored: Mon Oct 8 20:30:53 2018 -0700
Committer: Xiao Chen 
Committed: Mon Oct 8 20:31:57 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  8 +++
 .../hadoop/hdfs/DFSStripedInputStream.java  | 22 +++
 .../org/apache/hadoop/hdfs/ReaderStrategy.java  | 15 -
 .../org/apache/hadoop/hdfs/StripeReader.java| 23 ---
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  | 10 ++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 65 ++--
 .../erasurecode/ErasureCodingWorker.java|  3 +-
 .../erasurecode/StripedBlockReader.java | 14 +++--
 .../datanode/erasurecode/StripedReader.java | 17 ++---
 .../erasurecode/StripedReconstructor.java   |  3 +-
 .../TestDistributedFileSystemWithECFile.java| 44 +
 11 files changed, 178 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08bb6c49/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index e5640d2..52ed1d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -90,6 +90,8 @@ import com.google.common.annotations.VisibleForTesting;
 
 import javax.annotation.Nonnull;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /
  * DFSInputStream provides bytes from a named file.  It handles
  * negotiation of the namenode and various datanodes as necessary.
@@ -769,6 +771,12 @@ public class DFSInputStream extends FSInputStream
 // got a EOS from reader though we expect more data on it.
 throw new IOException("Unexpected EOS from the reader");
   }
+  updateReadStatistics(readStatistics, result, blockReader);
+  dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
+  result);
+  if (readStatistics.getBlockType() == BlockType.STRIPED) {
+dfsClient.updateFileSystemECReadStats(result);
+  }
   return result;
 } catch (ChecksumException ce) {
   throw ce;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08bb6c49/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 5557a50..3f688d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -54,6 +54,8 @@ import java.util.Collection;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /**
  * DFSStripedInputStream reads from striped block groups.
  */
@@ -329,6 +331,26 @@ public class DFSStripedInputStream extends DFSInputStream {
   }
 
   /**
+   * Update read statistics. Note that this has to be done on the thread that
+   * initiates the read, rather than inside each async thread, for
+   * {@link org.apache.hadoop.fs.FileSystem.Statistics} to work correctly with
+   * its ThreadLocal.
+   *
+   * @param stats striped read stats
+   */
+  void updateReadStats(final StripedBlockUtil.BlockReadStats stats) {
+if (stats == null) {
+  return;
+}
+updateReadStatistics(readStatistics, stats.getBytesRead(),
+stats.isShortCircuit(), stats.getNetworkDistance());
+dfsClient.updateFileSy

hadoop git commit: HDFS-13833. Improve BlockPlacementPolicyDefault's consider load logic. Contributed by Shweta.

2018-09-18 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 392965370 -> 27978bcb6


HDFS-13833. Improve BlockPlacementPolicyDefault's consider load logic. 
Contributed by Shweta.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27978bcb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27978bcb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27978bcb

Branch: refs/heads/trunk
Commit: 27978bcb66a9130cbf26d37ec454c0b7fcdc2530
Parents: 3929653
Author: Shweta 
Authored: Tue Sep 18 20:22:25 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 18 20:23:50 2018 -0700

--
 .../BlockPlacementPolicyDefault.java| 29 ++--
 .../blockmanagement/TestReplicationPolicy.java  | 28 +++
 2 files changed, 49 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27978bcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d00f961..d396845 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -913,6 +913,24 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   }
 
   /**
+   * Determine if a datanode should be chosen based on current workload.
+   *
+   * @param node The target datanode
+   * @return Return true if the datanode should be excluded, otherwise false
+   */
+  boolean excludeNodeByLoad(DatanodeDescriptor node){
+final double maxLoad = considerLoadFactor *
+stats.getInServiceXceiverAverage();
+final int nodeLoad = node.getXceiverCount();
+if ((nodeLoad > maxLoad) && (maxLoad > 0)) {
+  logNodeIsNotChosen(node, NodeNotChosenReason.NODE_TOO_BUSY,
+  "(load: " + nodeLoad + " > " + maxLoad + ")");
+  return true;
+}
+return false;
+  }
+
+  /**
* Determine if a datanode is good for placing block.
*
* @param node The target datanode
@@ -923,7 +941,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
* @param results A list containing currently chosen nodes. Used to check if
*too many nodes has been chosen in the target rack.
* @param avoidStaleNodes Whether or not to avoid choosing stale nodes
-   * @return Reture true if the datanode is good candidate, otherwise false
+   * @return Return true if the datanode is good candidate, otherwise false
*/
   boolean isGoodDatanode(DatanodeDescriptor node,
  int maxTargetPerRack, boolean considerLoad,
@@ -943,13 +961,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 }
 
 // check the communication traffic of the target machine
-if (considerLoad) {
-  final double maxLoad = considerLoadFactor *
-  stats.getInServiceXceiverAverage();
-  final int nodeLoad = node.getXceiverCount();
-  if (nodeLoad > maxLoad) {
-logNodeIsNotChosen(node, NodeNotChosenReason.NODE_TOO_BUSY,
-"(load: " + nodeLoad + " > " + maxLoad + ")");
+if(considerLoad){
+  if(excludeNodeByLoad(node)){
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27978bcb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 27dcbf1..f08fa13 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.ne

hadoop git commit: HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. Contributed by Daryn Sharp.

2018-09-11 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 1670879d6 -> ebc312ec9


HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. 
Contributed by Daryn Sharp.

(cherry picked from commit 8e54da1511e78477c1d4655d5ff0a69d0330869f)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

(cherry picked from commit 2dd27c999b22c550058de0e6eca7209b346cd143)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

(cherry picked from commit a5da4df73c525d68c76487509660a6b13f7fe99e)
(cherry picked from commit 5bf7f68eb790797aca7451b9c96470771dcd78a2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebc312ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebc312ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebc312ec

Branch: refs/heads/branch-2.8
Commit: ebc312ec990b52d9f92a753fe2a976b316882589
Parents: 1670879
Author: Xiao Chen 
Authored: Mon Sep 10 22:14:02 2018 -0700
Committer: Xiao Chen 
Committed: Mon Sep 10 23:13:31 2018 -0700

--
 .../hdfs/server/namenode/FSEditLogAsync.java|  61 ++-
 .../hdfs/server/namenode/TestEditLogRace.java   | 158 ++-
 2 files changed, 215 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebc312ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index 5990c22..1604872 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -24,7 +24,9 @@ import java.util.Deque;
 import java.util.List;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -145,15 +147,68 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
 edit.logSyncWait();
   }
 
+  // draining permits is intended to provide a high priority reservation.
+  // however, release of outstanding permits must be postponed until
+  // drained permits are restored to avoid starvation.  logic has some races
+  // but is good enough to serve its purpose.
+  private Semaphore overflowMutex = new Semaphore(8){
+private AtomicBoolean draining = new AtomicBoolean();
+private AtomicInteger pendingReleases = new AtomicInteger();
+@Override
+public int drainPermits() {
+  draining.set(true);
+  return super.drainPermits();
+}
+// while draining, count the releases until release(int)
+private void tryRelease(int permits) {
+  pendingReleases.getAndAdd(permits);
+  if (!draining.get()) {
+super.release(pendingReleases.getAndSet(0));
+  }
+}
+@Override
+public void release() {
+  tryRelease(1);
+}
+@Override
+public void release(int permits) {
+  draining.set(false);
+  tryRelease(permits);
+}
+  };
+
   private void enqueueEdit(Edit edit) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("logEdit " + edit);
 }
 try {
-  if (!editPendingQ.offer(edit, 1, TimeUnit.SECONDS)) {
+  // not checking for overflow yet to avoid penalizing performance of
+  // the common case.  if there is persistent overflow, a mutex will be
+  // use to throttle contention on the queue.
+  if (!editPendingQ.offer(edit)) {
 Preconditions.checkState(
 isSyncThreadAlive(), "sync thread is not alive");
-editPendingQ.put(edit);
+if (Thread.holdsLock(this)) {
+  // if queue is full, synchronized caller must immediately relinquish
+  // the monitor before re-offering to avoid deadlock with sync thread
+  // which needs the monitor to write transactions.
+  int permits = overflowMutex.drainPermits();
+  try {
+do {
+  this.wait(1000); // will be notified by next logSync.
+} while (!editPendingQ.offer(edit));
+  } finally {
+overflowMutex.release(permits);
+  }
+} else {
+ 

hadoop git commit: HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. Contributed by Daryn Sharp.

2018-09-11 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 3dffcd61a -> aa0f58df1


HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. 
Contributed by Daryn Sharp.

(cherry picked from commit 8e54da1511e78477c1d4655d5ff0a69d0330869f)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

(cherry picked from commit 2dd27c999b22c550058de0e6eca7209b346cd143)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

(cherry picked from commit a5da4df73c525d68c76487509660a6b13f7fe99e)
(cherry picked from commit 5bf7f68eb790797aca7451b9c96470771dcd78a2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa0f58df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa0f58df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa0f58df

Branch: refs/heads/branch-2.9
Commit: aa0f58df1d5892d751e32cebb3d08b04621dfd8b
Parents: 3dffcd6
Author: Xiao Chen 
Authored: Mon Sep 10 22:14:02 2018 -0700
Committer: Xiao Chen 
Committed: Mon Sep 10 23:13:22 2018 -0700

--
 .../hdfs/server/namenode/FSEditLogAsync.java|  61 ++-
 .../hdfs/server/namenode/TestEditLogRace.java   | 158 ++-
 2 files changed, 215 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa0f58df/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index 5990c22..1604872 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -24,7 +24,9 @@ import java.util.Deque;
 import java.util.List;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -145,15 +147,68 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
 edit.logSyncWait();
   }
 
+  // draining permits is intended to provide a high priority reservation.
+  // however, release of outstanding permits must be postponed until
+  // drained permits are restored to avoid starvation.  logic has some races
+  // but is good enough to serve its purpose.
+  private Semaphore overflowMutex = new Semaphore(8){
+private AtomicBoolean draining = new AtomicBoolean();
+private AtomicInteger pendingReleases = new AtomicInteger();
+@Override
+public int drainPermits() {
+  draining.set(true);
+  return super.drainPermits();
+}
+// while draining, count the releases until release(int)
+private void tryRelease(int permits) {
+  pendingReleases.getAndAdd(permits);
+  if (!draining.get()) {
+super.release(pendingReleases.getAndSet(0));
+  }
+}
+@Override
+public void release() {
+  tryRelease(1);
+}
+@Override
+public void release(int permits) {
+  draining.set(false);
+  tryRelease(permits);
+}
+  };
+
   private void enqueueEdit(Edit edit) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("logEdit " + edit);
 }
 try {
-  if (!editPendingQ.offer(edit, 1, TimeUnit.SECONDS)) {
+  // not checking for overflow yet to avoid penalizing performance of
+  // the common case.  if there is persistent overflow, a mutex will be
+  // use to throttle contention on the queue.
+  if (!editPendingQ.offer(edit)) {
 Preconditions.checkState(
 isSyncThreadAlive(), "sync thread is not alive");
-editPendingQ.put(edit);
+if (Thread.holdsLock(this)) {
+  // if queue is full, synchronized caller must immediately relinquish
+  // the monitor before re-offering to avoid deadlock with sync thread
+  // which needs the monitor to write transactions.
+  int permits = overflowMutex.drainPermits();
+  try {
+do {
+  this.wait(1000); // will be notified by next logSync.
+} while (!editPendingQ.offer(edit));
+  } finally {
+overflowMutex.release(permits);
+  }
+} else {
+ 

hadoop git commit: HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. Contributed by Daryn Sharp.

2018-09-11 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 121cefd47 -> a7b9e7d85


HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. 
Contributed by Daryn Sharp.

(cherry picked from commit 8e54da1511e78477c1d4655d5ff0a69d0330869f)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

(cherry picked from commit 2dd27c999b22c550058de0e6eca7209b346cd143)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

(cherry picked from commit a5da4df73c525d68c76487509660a6b13f7fe99e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7b9e7d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7b9e7d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7b9e7d8

Branch: refs/heads/branch-2
Commit: a7b9e7d858435793dec1afe6d93f6b3d9ede3347
Parents: 121cefd
Author: Xiao Chen 
Authored: Mon Sep 10 22:14:02 2018 -0700
Committer: Xiao Chen 
Committed: Mon Sep 10 23:13:14 2018 -0700

--
 .../hdfs/server/namenode/FSEditLogAsync.java|  61 ++-
 .../hdfs/server/namenode/TestEditLogRace.java   | 158 ++-
 2 files changed, 215 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7b9e7d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index c14a310..3eb75b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -24,7 +24,9 @@ import java.util.Deque;
 import java.util.List;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -145,15 +147,68 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
 edit.logSyncWait();
   }
 
+  // draining permits is intended to provide a high priority reservation.
+  // however, release of outstanding permits must be postponed until
+  // drained permits are restored to avoid starvation.  logic has some races
+  // but is good enough to serve its purpose.
+  private Semaphore overflowMutex = new Semaphore(8){
+private AtomicBoolean draining = new AtomicBoolean();
+private AtomicInteger pendingReleases = new AtomicInteger();
+@Override
+public int drainPermits() {
+  draining.set(true);
+  return super.drainPermits();
+}
+// while draining, count the releases until release(int)
+private void tryRelease(int permits) {
+  pendingReleases.getAndAdd(permits);
+  if (!draining.get()) {
+super.release(pendingReleases.getAndSet(0));
+  }
+}
+@Override
+public void release() {
+  tryRelease(1);
+}
+@Override
+public void release(int permits) {
+  draining.set(false);
+  tryRelease(permits);
+}
+  };
+
   private void enqueueEdit(Edit edit) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("logEdit " + edit);
 }
 try {
-  if (!editPendingQ.offer(edit, 1, TimeUnit.SECONDS)) {
+  // not checking for overflow yet to avoid penalizing performance of
+  // the common case.  if there is persistent overflow, a mutex will be
+  // use to throttle contention on the queue.
+  if (!editPendingQ.offer(edit)) {
 Preconditions.checkState(
 isSyncThreadAlive(), "sync thread is not alive");
-editPendingQ.put(edit);
+if (Thread.holdsLock(this)) {
+  // if queue is full, synchronized caller must immediately relinquish
+  // the monitor before re-offering to avoid deadlock with sync thread
+  // which needs the monitor to write transactions.
+  int permits = overflowMutex.drainPermits();
+  try {
+do {
+  this.wait(1000); // will be notified by next logSync.
+} while (!editPendingQ.offer(edit));
+  } finally {
+overflowMutex.release(permits);
+  }
+} else {
+  // mutex will throttle contention during persistent overflow.
+

hadoop git commit: HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. Contributed by Daryn Sharp.

2018-09-11 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 48dc8de28 -> a5da4df73


HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. 
Contributed by Daryn Sharp.

(cherry picked from commit 8e54da1511e78477c1d4655d5ff0a69d0330869f)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

(cherry picked from commit 2dd27c999b22c550058de0e6eca7209b346cd143)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5da4df7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5da4df7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5da4df7

Branch: refs/heads/branch-3.0
Commit: a5da4df73c525d68c76487509660a6b13f7fe99e
Parents: 48dc8de
Author: Xiao Chen 
Authored: Mon Sep 10 22:14:02 2018 -0700
Committer: Xiao Chen 
Committed: Mon Sep 10 23:00:41 2018 -0700

--
 .../hdfs/server/namenode/FSEditLogAsync.java|  61 ++-
 .../hdfs/server/namenode/TestEditLogRace.java   | 158 ++-
 2 files changed, 215 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5da4df7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index 5990c22..1604872 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -24,7 +24,9 @@ import java.util.Deque;
 import java.util.List;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -145,15 +147,68 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
 edit.logSyncWait();
   }
 
+  // draining permits is intended to provide a high priority reservation.
+  // however, release of outstanding permits must be postponed until
+  // drained permits are restored to avoid starvation.  logic has some races
+  // but is good enough to serve its purpose.
+  private Semaphore overflowMutex = new Semaphore(8){
+private AtomicBoolean draining = new AtomicBoolean();
+private AtomicInteger pendingReleases = new AtomicInteger();
+@Override
+public int drainPermits() {
+  draining.set(true);
+  return super.drainPermits();
+}
+// while draining, count the releases until release(int)
+private void tryRelease(int permits) {
+  pendingReleases.getAndAdd(permits);
+  if (!draining.get()) {
+super.release(pendingReleases.getAndSet(0));
+  }
+}
+@Override
+public void release() {
+  tryRelease(1);
+}
+@Override
+public void release(int permits) {
+  draining.set(false);
+  tryRelease(permits);
+}
+  };
+
   private void enqueueEdit(Edit edit) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("logEdit " + edit);
 }
 try {
-  if (!editPendingQ.offer(edit, 1, TimeUnit.SECONDS)) {
+  // not checking for overflow yet to avoid penalizing performance of
+  // the common case.  if there is persistent overflow, a mutex will be
+  // use to throttle contention on the queue.
+  if (!editPendingQ.offer(edit)) {
 Preconditions.checkState(
 isSyncThreadAlive(), "sync thread is not alive");
-editPendingQ.put(edit);
+if (Thread.holdsLock(this)) {
+  // if queue is full, synchronized caller must immediately relinquish
+  // the monitor before re-offering to avoid deadlock with sync thread
+  // which needs the monitor to write transactions.
+  int permits = overflowMutex.drainPermits();
+  try {
+do {
+  this.wait(1000); // will be notified by next logSync.
+} while (!editPendingQ.offer(edit));
+  } finally {
+overflowMutex.release(permits);
+  }
+} else {
+  // mutex will throttle contention during persistent overflow.
+  overflowMutex.acquire();
+  try {
+  

hadoop git commit: HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. Contributed by Daryn Sharp.

2018-09-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 77dd45646 -> 2dd27c999


HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. 
Contributed by Daryn Sharp.

(cherry picked from commit 8e54da1511e78477c1d4655d5ff0a69d0330869f)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2dd27c99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2dd27c99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2dd27c99

Branch: refs/heads/branch-3.1
Commit: 2dd27c999b22c550058de0e6eca7209b346cd143
Parents: 77dd456
Author: Xiao Chen 
Authored: Mon Sep 10 22:14:02 2018 -0700
Committer: Xiao Chen 
Committed: Mon Sep 10 22:44:48 2018 -0700

--
 .../hdfs/server/namenode/FSEditLogAsync.java|  61 ++-
 .../hdfs/server/namenode/TestEditLogRace.java   | 158 ++-
 2 files changed, 215 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dd27c99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index 5990c22..1604872 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -24,7 +24,9 @@ import java.util.Deque;
 import java.util.List;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -145,15 +147,68 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
 edit.logSyncWait();
   }
 
+  // draining permits is intended to provide a high priority reservation.
+  // however, release of outstanding permits must be postponed until
+  // drained permits are restored to avoid starvation.  logic has some races
+  // but is good enough to serve its purpose.
+  private Semaphore overflowMutex = new Semaphore(8){
+private AtomicBoolean draining = new AtomicBoolean();
+private AtomicInteger pendingReleases = new AtomicInteger();
+@Override
+public int drainPermits() {
+  draining.set(true);
+  return super.drainPermits();
+}
+// while draining, count the releases until release(int)
+private void tryRelease(int permits) {
+  pendingReleases.getAndAdd(permits);
+  if (!draining.get()) {
+super.release(pendingReleases.getAndSet(0));
+  }
+}
+@Override
+public void release() {
+  tryRelease(1);
+}
+@Override
+public void release(int permits) {
+  draining.set(false);
+  tryRelease(permits);
+}
+  };
+
   private void enqueueEdit(Edit edit) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("logEdit " + edit);
 }
 try {
-  if (!editPendingQ.offer(edit, 1, TimeUnit.SECONDS)) {
+  // not checking for overflow yet to avoid penalizing performance of
+  // the common case.  if there is persistent overflow, a mutex will be
+  // use to throttle contention on the queue.
+  if (!editPendingQ.offer(edit)) {
 Preconditions.checkState(
 isSyncThreadAlive(), "sync thread is not alive");
-editPendingQ.put(edit);
+if (Thread.holdsLock(this)) {
+  // if queue is full, synchronized caller must immediately relinquish
+  // the monitor before re-offering to avoid deadlock with sync thread
+  // which needs the monitor to write transactions.
+  int permits = overflowMutex.drainPermits();
+  try {
+do {
+  this.wait(1000); // will be notified by next logSync.
+} while (!editPendingQ.offer(edit));
+  } finally {
+overflowMutex.release(permits);
+  }
+} else {
+  // mutex will throttle contention during persistent overflow.
+  overflowMutex.acquire();
+  try {
+editPendingQ.put(edit);
+  } finally {
+overflowMutex.release();
+  }
+}
   }
 } catch (Throwable t) {
   // should never happen!  failure to enqueue an edit i

hadoop git commit: HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. Contributed by Daryn Sharp.

2018-09-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 96892c469 -> 8e54da151


HDFS-13051. Fix dead lock during async editlog rolling if edit queue is full. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e54da15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e54da15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e54da15

Branch: refs/heads/trunk
Commit: 8e54da1511e78477c1d4655d5ff0a69d0330869f
Parents: 96892c4
Author: Xiao Chen 
Authored: Mon Sep 10 22:14:02 2018 -0700
Committer: Xiao Chen 
Committed: Mon Sep 10 22:34:23 2018 -0700

--
 .../hdfs/server/namenode/FSEditLogAsync.java|  61 ++-
 .../hdfs/server/namenode/TestEditLogRace.java   | 158 ++-
 2 files changed, 215 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e54da15/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index 7f39379..2b47398 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -24,7 +24,9 @@ import java.util.Deque;
 import java.util.List;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -145,15 +147,68 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
 edit.logSyncWait();
   }
 
+  // draining permits is intended to provide a high priority reservation.
+  // however, release of outstanding permits must be postponed until
+  // drained permits are restored to avoid starvation.  logic has some races
+  // but is good enough to serve its purpose.
+  private Semaphore overflowMutex = new Semaphore(8){
+private AtomicBoolean draining = new AtomicBoolean();
+private AtomicInteger pendingReleases = new AtomicInteger();
+@Override
+public int drainPermits() {
+  draining.set(true);
+  return super.drainPermits();
+}
+// while draining, count the releases until release(int)
+private void tryRelease(int permits) {
+  pendingReleases.getAndAdd(permits);
+  if (!draining.get()) {
+super.release(pendingReleases.getAndSet(0));
+  }
+}
+@Override
+public void release() {
+  tryRelease(1);
+}
+@Override
+public void release(int permits) {
+  draining.set(false);
+  tryRelease(permits);
+}
+  };
+
   private void enqueueEdit(Edit edit) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("logEdit " + edit);
 }
 try {
-  if (!editPendingQ.offer(edit, 1, TimeUnit.SECONDS)) {
+  // not checking for overflow yet to avoid penalizing performance of
+  // the common case.  if there is persistent overflow, a mutex will be
+  // use to throttle contention on the queue.
+  if (!editPendingQ.offer(edit)) {
 Preconditions.checkState(
 isSyncThreadAlive(), "sync thread is not alive");
-editPendingQ.put(edit);
+if (Thread.holdsLock(this)) {
+  // if queue is full, synchronized caller must immediately relinquish
+  // the monitor before re-offering to avoid deadlock with sync thread
+  // which needs the monitor to write transactions.
+  int permits = overflowMutex.drainPermits();
+  try {
+do {
+  this.wait(1000); // will be notified by next logSync.
+} while (!editPendingQ.offer(edit));
+  } finally {
+overflowMutex.release(permits);
+  }
+} else {
+  // mutex will throttle contention during persistent overflow.
+  overflowMutex.acquire();
+  try {
+editPendingQ.put(edit);
+  } finally {
+overflowMutex.release();
+  }
+}
   }
 } catch (Throwable t) {
   // should never happen!  failure to enqueue an edit is fatal

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e54da15/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

hadoop git commit: HDFS-13820. Add an ability to disable CacheReplicationMonitor. Contributed by Hrishikesh Gadre.

2018-09-07 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 16333782c -> 335a8139f


HDFS-13820. Add an ability to disable CacheReplicationMonitor. Contributed by 
Hrishikesh Gadre.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/335a8139
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/335a8139
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/335a8139

Branch: refs/heads/trunk
Commit: 335a8139f5b9004414b2942eeac5a008283a6f75
Parents: 1633378
Author: Hrishikesh Gadre 
Authored: Fri Sep 7 14:55:22 2018 -0700
Committer: Xiao Chen 
Committed: Fri Sep 7 14:59:06 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  3 ++
 .../hdfs/server/namenode/CacheManager.java  | 42 -
 .../src/main/resources/hdfs-default.xml | 11 +
 .../site/markdown/CentralizedCacheManagement.md |  5 ++
 .../server/namenode/TestCacheDirectives.java| 49 
 5 files changed, 108 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/335a8139/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index bd88341..a7e7b9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -390,6 +390,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  
DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS =
   "dfs.namenode.path.based.cache.refresh.interval.ms";
   public static final long
DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT = 3L;
+  public static final String  DFS_NAMENODE_CACHING_ENABLED_KEY =
+  "dfs.namenode.caching.enabled";
+  public static final boolean DFS_NAMENODE_CACHING_ENABLED_DEFAULT = true;
 
   /** Pending period of block deletion since NameNode startup */
   public static final String  
DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY = 
"dfs.namenode.startup.delay.block.deletion.sec";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/335a8139/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
index ab026f0..8a29492 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
@@ -25,6 +25,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT;
 
 import java.io.DataInput;
 import java.io.DataOutputStream;
@@ -172,6 +174,21 @@ public class CacheManager {
   private final SerializerCompat serializerCompat = new SerializerCompat();
 
   /**
+   * Whether caching is enabled.
+   *
+   * If caching is disabled, we will not process cache reports or store
+   * information about what is cached where.  We also do not start the
+   * CacheReplicationMonitor thread.  This will save resources, but provide
+   * less functionality.
+   *
+   * Even when caching is disabled, we still store path-based cache
+   * information.  This information is stored in the edit log and fsimage.  We
+   * don't want to lose it just because a configuration setting was turned off.
+   * However, we will not act on this information if caching is disabled.
+   */
+  private final boolean enabled;
+
+  /**
* The CacheReplicationMonitor.
*/
   private CacheReplicationMonitor monitor;
@@ -194,6 +211,8 @@ public class CacheManager {
 this.namesystem = namesystem;
 this.blockManager = blockManager;
 this.nextDirectiveId = 1;
+ 

hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 a9d86c526 -> 0a3eefeda


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a3eefed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a3eefed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a3eefed

Branch: refs/heads/branch-2.7
Commit: 0a3eefeda1bf13d84fc4472027499168ddd8407f
Parents: a9d86c5
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:55:33 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3eefed/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 72c125d..9447e41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -243,7 +243,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 54d3189cf -> 25337c045


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25337c04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25337c04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25337c04

Branch: refs/heads/branch-3.0
Commit: 25337c045a44f7f837574d0a91a655bfb2e04048
Parents: 54d3189
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:54:56 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25337c04/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 89ad670..f2de043 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -220,7 +220,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 2e869cbce -> 5cd0b8038


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cd0b803
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cd0b803
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cd0b803

Branch: refs/heads/branch-2.9
Commit: 5cd0b8038cc81a38ea2dbaa42e50038034b3ecee
Parents: 2e869cb
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:55:16 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cd0b803/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 89ad670..f2de043 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -220,7 +220,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c7c5d7392 -> 1359e8da7


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1359e8da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1359e8da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1359e8da

Branch: refs/heads/branch-2.8
Commit: 1359e8da770aaf6fba35320dac352f9a362394d3
Parents: c7c5d73
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:55:25 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1359e8da/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 72c125d..9447e41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -243,7 +243,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2e744bd31 -> 0468b6e73


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0468b6e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0468b6e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0468b6e7

Branch: refs/heads/branch-2
Commit: 0468b6e7361fec0882c45358dd83385a1b13e5c7
Parents: 2e744bd
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:55:09 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0468b6e7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 89ad670..f2de043 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -220,7 +220,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a26565960 -> fde3b5ac2


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fde3b5ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fde3b5ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fde3b5ac

Branch: refs/heads/branch-3.1
Commit: fde3b5ac227d07572637d39447b0ab833f0f73af
Parents: a265659
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:54:43 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fde3b5ac/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 89ad670..f2de043 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -220,7 +220,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6883fe860 -> 6ccb809c2


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ccb809c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ccb809c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ccb809c

Branch: refs/heads/trunk
Commit: 6ccb809c2d38a45e716153ba16e135cb76167b2b
Parents: 6883fe8
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:53:42 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ccb809c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 89ad670..f2de043 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -220,7 +220,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13885. Add debug logs in dfsclient around decrypting EDEK. Contributed by Kitti Nanasi.

2018-09-03 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 567b5558d -> 6e5ffb74d


HDFS-13885. Add debug logs in dfsclient around decrypting EDEK. Contributed by 
Kitti Nanasi.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e5ffb74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e5ffb74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e5ffb74

Branch: refs/heads/trunk
Commit: 6e5ffb74dd678ddc3392ae2f251c80fc5cc8c62f
Parents: 567b555
Author: Kitti Nanasi 
Authored: Mon Sep 3 22:32:53 2018 -0700
Committer: Xiao Chen 
Committed: Mon Sep 3 22:37:37 2018 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java  | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e5ffb74/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index adbb133..fb75322 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -953,8 +953,12 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   final CryptoCodec codec = HdfsKMSUtil.getCryptoCodec(conf, feInfo);
   KeyVersion decrypted;
   try (TraceScope ignored = tracer.newScope("decryptEDEK")) {
+LOG.debug("Start decrypting EDEK for file: {}, output stream: 0x{}",
+dfsos.getSrc(), Integer.toHexString(dfsos.hashCode()));
 decrypted = HdfsKMSUtil.decryptEncryptedDataEncryptionKey(feInfo,
   getKeyProvider());
+LOG.debug("Decrypted EDEK for file: {}, output stream: 0x{}",
+dfsos.getSrc(), Integer.toHexString(dfsos.hashCode()));
   }
   final CryptoOutputStream cryptoOut =
   new CryptoOutputStream(dfsos, codec,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by Kitti Nanasi.

2018-08-29 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 6a547856e -> fa32269ce


HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by 
Kitti Nanasi.

(cherry picked from commit 781437c219dc3422797a32dc7ba72cd4f5ee38e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa32269c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa32269c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa32269c

Branch: refs/heads/branch-3.0
Commit: fa32269cee5ec7125fb7e6d06c49716fdfe00af9
Parents: 6a54785
Author: Kitti Nanasi 
Authored: Wed Aug 29 22:06:36 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 29 22:09:13 2018 -0700

--
 .../crypto/key/kms/server/KMSConfiguration.java | 31 
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 38 +---
 .../crypto/key/kms/server/KMSWebServer.java |  1 +
 3 files changed, 33 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa32269c/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index 18eec19..35ffb42 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -103,6 +104,8 @@ public class KMSConfiguration {
 
   public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
 
+  private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
+
   static {
 Configuration.addDefaultResource(KMS_DEFAULT_XML);
 Configuration.addDefaultResource(KMS_SITE_XML);
@@ -159,4 +162,32 @@ public class KMSConfiguration {
 }
 return newer;
   }
+
+  public static void initLogging() {
+String confDir = System.getProperty(KMS_CONFIG_DIR);
+if (confDir == null) {
+  throw new RuntimeException("System property '" +
+  KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
+}
+if (System.getProperty("log4j.configuration") == null) {
+  System.setProperty("log4j.defaultInitOverride", "true");
+  boolean fromClasspath = true;
+  File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
+  if (log4jConf.exists()) {
+PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
+fromClasspath = false;
+  } else {
+ClassLoader cl = Thread.currentThread().getContextClassLoader();
+URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
+if (log4jUrl != null) {
+  PropertyConfigurator.configure(log4jUrl);
+}
+  }
+  LOG.debug("KMS log starting");
+  if (fromClasspath) {
+LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
+LOG.warn("Logging with INFO level to standard output");
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa32269c/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index 9a71fa2..571d675 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -17,10 +17,8 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.URI;
-import java.net.URL;
 
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
@@ -36,14 +34,13 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 impo

hadoop git commit: HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by Kitti Nanasi.

2018-08-29 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 8a3be0d5a -> 12eb9cc3b


HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by 
Kitti Nanasi.

(cherry picked from commit 781437c219dc3422797a32dc7ba72cd4f5ee38e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12eb9cc3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12eb9cc3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12eb9cc3

Branch: refs/heads/branch-3.1
Commit: 12eb9cc3bb7c07a7ff2026b704d26e7a68fcd622
Parents: 8a3be0d
Author: Kitti Nanasi 
Authored: Wed Aug 29 22:06:36 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 29 22:09:05 2018 -0700

--
 .../crypto/key/kms/server/KMSConfiguration.java | 31 
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 38 +---
 .../crypto/key/kms/server/KMSWebServer.java |  1 +
 3 files changed, 33 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12eb9cc3/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index 18eec19..35ffb42 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -103,6 +104,8 @@ public class KMSConfiguration {
 
   public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
 
+  private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
+
   static {
 Configuration.addDefaultResource(KMS_DEFAULT_XML);
 Configuration.addDefaultResource(KMS_SITE_XML);
@@ -159,4 +162,32 @@ public class KMSConfiguration {
 }
 return newer;
   }
+
+  public static void initLogging() {
+String confDir = System.getProperty(KMS_CONFIG_DIR);
+if (confDir == null) {
+  throw new RuntimeException("System property '" +
+  KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
+}
+if (System.getProperty("log4j.configuration") == null) {
+  System.setProperty("log4j.defaultInitOverride", "true");
+  boolean fromClasspath = true;
+  File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
+  if (log4jConf.exists()) {
+PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
+fromClasspath = false;
+  } else {
+ClassLoader cl = Thread.currentThread().getContextClassLoader();
+URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
+if (log4jUrl != null) {
+  PropertyConfigurator.configure(log4jUrl);
+}
+  }
+  LOG.debug("KMS log starting");
+  if (fromClasspath) {
+LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
+LOG.warn("Logging with INFO level to standard output");
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12eb9cc3/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index cb4bf7e..0640e25 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -17,10 +17,8 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.URI;
-import java.net.URL;
 
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
@@ -37,14 +35,13 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 impo

hadoop git commit: HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by Kitti Nanasi.

2018-08-29 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 582cb10ec -> 781437c21


HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by 
Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/781437c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/781437c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/781437c2

Branch: refs/heads/trunk
Commit: 781437c219dc3422797a32dc7ba72cd4f5ee38e2
Parents: 582cb10
Author: Kitti Nanasi 
Authored: Wed Aug 29 22:06:36 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 29 22:07:49 2018 -0700

--
 .../crypto/key/kms/server/KMSConfiguration.java | 31 
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 38 +---
 .../crypto/key/kms/server/KMSWebServer.java |  1 +
 3 files changed, 33 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/781437c2/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index 18eec19..35ffb42 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -103,6 +104,8 @@ public class KMSConfiguration {
 
   public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
 
+  private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
+
   static {
 Configuration.addDefaultResource(KMS_DEFAULT_XML);
 Configuration.addDefaultResource(KMS_SITE_XML);
@@ -159,4 +162,32 @@ public class KMSConfiguration {
 }
 return newer;
   }
+
+  public static void initLogging() {
+String confDir = System.getProperty(KMS_CONFIG_DIR);
+if (confDir == null) {
+  throw new RuntimeException("System property '" +
+  KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
+}
+if (System.getProperty("log4j.configuration") == null) {
+  System.setProperty("log4j.defaultInitOverride", "true");
+  boolean fromClasspath = true;
+  File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
+  if (log4jConf.exists()) {
+PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
+fromClasspath = false;
+  } else {
+ClassLoader cl = Thread.currentThread().getContextClassLoader();
+URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
+if (log4jUrl != null) {
+  PropertyConfigurator.configure(log4jUrl);
+}
+  }
+  LOG.debug("KMS log starting");
+  if (fromClasspath) {
+LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
+LOG.warn("Logging with INFO level to standard output");
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/781437c2/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index cb4bf7e..0640e25 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -17,10 +17,8 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.URI;
-import java.net.URL;
 
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
@@ -37,14 +35,13 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
-import org.apache.log4j.PropertyConfigurator;
 im

hadoop git commit: HDFS-13731. ReencryptionUpdater fails with ConcurrentModificationException during processCheckpoints. Contributed by Zsolt Venczel.

2018-08-28 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d89a485b2 -> 2d1f81997


HDFS-13731. ReencryptionUpdater fails with ConcurrentModificationException 
during processCheckpoints. Contributed by Zsolt Venczel.

(cherry picked from commit 3e18b957ebdf20925224ab9c28e6c2f4b6bbdb24)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d1f8199
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d1f8199
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d1f8199

Branch: refs/heads/branch-3.0
Commit: 2d1f819979d83abf984d2d36e3eee485d27d8714
Parents: d89a485
Author: Zsolt Venczel 
Authored: Tue Aug 28 15:11:58 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 28 15:14:57 2018 -0700

--
 .../server/namenode/ReencryptionHandler.java|  6 +--
 .../server/namenode/ReencryptionUpdater.java| 52 ++--
 2 files changed, 30 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d1f8199/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index ac40950..2ab395c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -714,10 +714,10 @@ public class ReencryptionHandler implements Runnable {
   zst = new ZoneSubmissionTracker();
   submissions.put(zoneId, zst);
 }
+Future future = batchService.submit(new EDEKReencryptCallable(zoneId,
+currentBatch, reencryptionHandler));
+zst.addTask(future);
   }
-  Future future = batchService.submit(new EDEKReencryptCallable(zoneId,
-  currentBatch, reencryptionHandler));
-  zst.addTask(future);
   LOG.info("Submitted batch (start:{}, size:{}) of zone {} to re-encrypt.",
   currentBatch.getFirstFilePath(), currentBatch.size(), zoneId);
   currentBatch = new ReencryptionBatch(reencryptBatchSize);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d1f8199/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
index a5923a7..15cfa92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
@@ -383,32 +383,34 @@ public final class ReencryptionUpdater implements 
Runnable {
 final LinkedList tasks = tracker.getTasks();
 final List xAttrs = Lists.newArrayListWithCapacity(1);
 ListIterator iter = tasks.listIterator();
-while (iter.hasNext()) {
-  Future curr = iter.next();
-  if (curr.isCancelled()) {
-break;
-  }
-  if (!curr.isDone() || !curr.get().processed) {
-// still has earlier tasks not completed, skip here.
-break;
-  }
-  ReencryptionTask task = curr.get();
-  LOG.debug("Updating re-encryption checkpoint with completed task."
-  + " last: {} size:{}.", task.lastFile, task.batch.size());
-  assert zoneId == task.zoneId;
-  try {
-final XAttr xattr = FSDirEncryptionZoneOp
-.updateReencryptionProgress(dir, zoneNode, status, task.lastFile,
-task.numFilesUpdated, task.numFailures);
-xAttrs.clear();
-xAttrs.add(xattr);
-  } catch (IOException ie) {
-LOG.warn("Failed to update re-encrypted progress to xattr for zone {}",
-zonePath, ie);
-++task.numFailures;
+synchronized (handler) {
+  while (iter.hasNext()) {
+Future curr = iter.next();
+if (curr.isCancelled()) {
+  break;
+}
+if (!curr.isDone() || !curr.get().processed) {
+  // still has earlier tasks not completed, skip here.
+  break;
+}
+ReencryptionTask task = curr.get();
+LOG.debug("Updating re-encryption checkpoint with completed task."
++ " last: {} size:{}.&q

hadoop git commit: HDFS-13731. ReencryptionUpdater fails with ConcurrentModificationException during processCheckpoints. Contributed by Zsolt Venczel.

2018-08-28 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 450ba6790 -> 5cbb9b1ca


HDFS-13731. ReencryptionUpdater fails with ConcurrentModificationException 
during processCheckpoints. Contributed by Zsolt Venczel.

(cherry picked from commit 3e18b957ebdf20925224ab9c28e6c2f4b6bbdb24)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cbb9b1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cbb9b1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cbb9b1c

Branch: refs/heads/branch-3.1
Commit: 5cbb9b1ca9cc0b2c4fdd66b8f25e64f3c03d67b5
Parents: 450ba67
Author: Zsolt Venczel 
Authored: Tue Aug 28 15:11:58 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 28 15:14:13 2018 -0700

--
 .../server/namenode/ReencryptionHandler.java|  6 +--
 .../server/namenode/ReencryptionUpdater.java| 52 ++--
 2 files changed, 30 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cbb9b1c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index b92fe9f..12744df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -713,10 +713,10 @@ public class ReencryptionHandler implements Runnable {
   zst = new ZoneSubmissionTracker();
   submissions.put(zoneId, zst);
 }
+Future future = batchService.submit(new EDEKReencryptCallable(zoneId,
+currentBatch, reencryptionHandler));
+zst.addTask(future);
   }
-  Future future = batchService.submit(new EDEKReencryptCallable(zoneId,
-  currentBatch, reencryptionHandler));
-  zst.addTask(future);
   LOG.info("Submitted batch (start:{}, size:{}) of zone {} to re-encrypt.",
   currentBatch.getFirstFilePath(), currentBatch.size(), zoneId);
   currentBatch = new ReencryptionBatch(reencryptBatchSize);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cbb9b1c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
index a5923a7..15cfa92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
@@ -383,32 +383,34 @@ public final class ReencryptionUpdater implements 
Runnable {
 final LinkedList tasks = tracker.getTasks();
 final List xAttrs = Lists.newArrayListWithCapacity(1);
 ListIterator iter = tasks.listIterator();
-while (iter.hasNext()) {
-  Future curr = iter.next();
-  if (curr.isCancelled()) {
-break;
-  }
-  if (!curr.isDone() || !curr.get().processed) {
-// still has earlier tasks not completed, skip here.
-break;
-  }
-  ReencryptionTask task = curr.get();
-  LOG.debug("Updating re-encryption checkpoint with completed task."
-  + " last: {} size:{}.", task.lastFile, task.batch.size());
-  assert zoneId == task.zoneId;
-  try {
-final XAttr xattr = FSDirEncryptionZoneOp
-.updateReencryptionProgress(dir, zoneNode, status, task.lastFile,
-task.numFilesUpdated, task.numFailures);
-xAttrs.clear();
-xAttrs.add(xattr);
-  } catch (IOException ie) {
-LOG.warn("Failed to update re-encrypted progress to xattr for zone {}",
-zonePath, ie);
-++task.numFailures;
+synchronized (handler) {
+  while (iter.hasNext()) {
+Future curr = iter.next();
+if (curr.isCancelled()) {
+  break;
+}
+if (!curr.isDone() || !curr.get().processed) {
+  // still has earlier tasks not completed, skip here.
+  break;
+}
+ReencryptionTask task = curr.get();
+LOG.debug("Updating re-encryption checkpoint with completed task."
++ " last: {} size:{}.&q

hadoop git commit: HDFS-13731. ReencryptionUpdater fails with ConcurrentModificationException during processCheckpoints. Contributed by Zsolt Venczel.

2018-08-28 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk c5629d546 -> 3e18b957e


HDFS-13731. ReencryptionUpdater fails with ConcurrentModificationException 
during processCheckpoints. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e18b957
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e18b957
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e18b957

Branch: refs/heads/trunk
Commit: 3e18b957ebdf20925224ab9c28e6c2f4b6bbdb24
Parents: c5629d5
Author: Zsolt Venczel 
Authored: Tue Aug 28 15:11:58 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 28 15:13:43 2018 -0700

--
 .../server/namenode/ReencryptionHandler.java|  6 +--
 .../server/namenode/ReencryptionUpdater.java| 52 ++--
 2 files changed, 30 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e18b957/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index c8c8d68..a8acccd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -714,10 +714,10 @@ public class ReencryptionHandler implements Runnable {
   zst = new ZoneSubmissionTracker();
   submissions.put(zoneId, zst);
 }
+Future future = batchService.submit(new EDEKReencryptCallable(zoneId,
+currentBatch, reencryptionHandler));
+zst.addTask(future);
   }
-  Future future = batchService.submit(new EDEKReencryptCallable(zoneId,
-  currentBatch, reencryptionHandler));
-  zst.addTask(future);
   LOG.info("Submitted batch (start:{}, size:{}) of zone {} to re-encrypt.",
   currentBatch.getFirstFilePath(), currentBatch.size(), zoneId);
   currentBatch = new ReencryptionBatch(reencryptBatchSize);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e18b957/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
index a5923a7..15cfa92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
@@ -383,32 +383,34 @@ public final class ReencryptionUpdater implements 
Runnable {
 final LinkedList tasks = tracker.getTasks();
 final List xAttrs = Lists.newArrayListWithCapacity(1);
 ListIterator iter = tasks.listIterator();
-while (iter.hasNext()) {
-  Future curr = iter.next();
-  if (curr.isCancelled()) {
-break;
-  }
-  if (!curr.isDone() || !curr.get().processed) {
-// still has earlier tasks not completed, skip here.
-break;
-  }
-  ReencryptionTask task = curr.get();
-  LOG.debug("Updating re-encryption checkpoint with completed task."
-  + " last: {} size:{}.", task.lastFile, task.batch.size());
-  assert zoneId == task.zoneId;
-  try {
-final XAttr xattr = FSDirEncryptionZoneOp
-.updateReencryptionProgress(dir, zoneNode, status, task.lastFile,
-task.numFilesUpdated, task.numFailures);
-xAttrs.clear();
-xAttrs.add(xattr);
-  } catch (IOException ie) {
-LOG.warn("Failed to update re-encrypted progress to xattr for zone {}",
-zonePath, ie);
-++task.numFailures;
+synchronized (handler) {
+  while (iter.hasNext()) {
+Future curr = iter.next();
+if (curr.isCancelled()) {
+  break;
+}
+if (!curr.isDone() || !curr.get().processed) {
+  // still has earlier tasks not completed, skip here.
+  break;
+}
+ReencryptionTask task = curr.get();
+LOG.debug("Updating re-encryption checkpoint with completed task."
++ " last: {} size:{}.", task.lastFile, task.batch.size());
+assert zoneId == task.zoneId;
+   

hadoop git commit: HDFS-13837. Enable debug log for LeaseRenewer in TestDistributedFileSystem. Contributed by Shweta.

2018-08-28 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk ac515d22d -> 33f42efc9


HDFS-13837. Enable debug log for LeaseRenewer in TestDistributedFileSystem. 
Contributed by Shweta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33f42efc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33f42efc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33f42efc

Branch: refs/heads/trunk
Commit: 33f42efc947445b7755da6aad34b5e26b96ad663
Parents: ac515d2
Author: Shweta 
Authored: Tue Aug 28 13:51:04 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 28 13:56:32 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33f42efc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 46323dd..cae0fbf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -100,12 +100,12 @@ import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.InOrder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 public class TestDistributedFileSystem {
   private static final Random RAN = new Random();
@@ -113,7 +113,8 @@ public class TestDistributedFileSystem {
   TestDistributedFileSystem.class);
 
   static {
-GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
+GenericTestUtils.setLogLevel(DFSClient.LOG, Level.TRACE);
+GenericTestUtils.setLogLevel(LeaseRenewer.LOG, Level.DEBUG);
   }
 
   private boolean dualPortTesting = false;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-9214. Create a new touch command to allow modifying atime and mtime. Contributed by Hrishikesh Gadre.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk a17eed1b8 -> 60ffec9f7


HADOOP-9214. Create a new touch command to allow modifying atime and mtime. 
Contributed by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60ffec9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60ffec9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60ffec9f

Branch: refs/heads/trunk
Commit: 60ffec9f7921a50aff20434c1042b16fa59240f7
Parents: a17eed1
Author: Xiao Chen 
Authored: Fri Aug 17 10:53:22 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 11:18:09 2018 -0700

--
 .../org/apache/hadoop/fs/shell/FsCommand.java   |   2 +-
 .../java/org/apache/hadoop/fs/shell/Touch.java  |  85 
 .../apache/hadoop/fs/shell/TouchCommands.java   | 198 +++
 .../src/site/markdown/FileSystemShell.md|  32 +++
 .../org/apache/hadoop/fs/TestFsShellTouch.java  | 103 ++
 .../src/test/resources/testConf.xml |  51 +
 6 files changed, 385 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60ffec9f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
index 4a13414..784bbf3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
@@ -66,7 +66,7 @@ abstract public class FsCommand extends Command {
 factory.registerCommands(Tail.class);
 factory.registerCommands(Head.class);
 factory.registerCommands(Test.class);
-factory.registerCommands(Touch.class);
+factory.registerCommands(TouchCommands.class);
 factory.registerCommands(Truncate.class);
 factory.registerCommands(SnapshotCommands.class);
 factory.registerCommands(XAttrCommands.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60ffec9f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
deleted file mode 100644
index a6c751e..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.shell;
-
-import java.io.IOException;
-import java.util.LinkedList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.PathIOException;
-import org.apache.hadoop.fs.PathIsDirectoryException;
-import org.apache.hadoop.fs.PathNotFoundException;
-
-/**
- * Unix touch like commands
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-
-class Touch extends FsCommand {
-  public static void registerCommands(CommandFactory factory) {
-factory.addClass(Touchz.class, "-touchz");
-  }
-
-  /**
-   * (Re)create zero-length file at the specified path.
-   * This will be replaced by a more UNIX-like touch when files may be
-   * modified.
-   */
-  public static class Touchz extends Touch {
-public static final String NAME = "touchz";
-public static final String USAGE = " ...";
-public static final String DESCRIPTION =
-  "Creates a file of zero length " +
-  "at  with current time as the timestamp of that . " +
-  "An error is returned if the file exists with non-zero 

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 4845464ba -> 2ed1a5d00


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)

 Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java

(cherry picked from commit 5232653ec0bf8d08187f41930eec073e1b7b1df2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ed1a5d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ed1a5d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ed1a5d0

Branch: refs/heads/branch-2.7
Commit: 2ed1a5d000e3c18003a51dae3d161dcc19f0a307
Parents: 4845464
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:21:10 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 61 ++--
 1 file changed, 57 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ed1a5d0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 0436cc0..cdeb3b5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -32,8 +32,6 @@ import javax.net.ssl.SSLHandshakeException;
 import javax.net.ssl.SSLSocket;
 import javax.net.ssl.SSLSocketFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.IOUtils;
@@ -43,10 +41,12 @@ import org.apache.hadoop.security.ssl.SSLFactory;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -54,12 +54,17 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
   private static final String BASEDIR = System.getProperty("test.build.dir",
   "target/test-dir") + "/" + TestSSLHttpServer.class.getSimpleName();
 
-  private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -80,6 +85,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
 
@@ -125,6 +133,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 185c8f2ab -> a41f18098


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a41f1809
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a41f1809
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a41f1809

Branch: refs/heads/branch-3.0
Commit: a41f18098b849ba2ccbae824a5a57bfe7b6ad44e
Parents: 185c8f2
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:20:20 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 54 +++-
 1 file changed, 53 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a41f1809/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 5af6d6f..2166464 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -58,11 +58,15 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -83,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
 
@@ -127,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void restoreHttpsCipherSuites() {
+if (cipherSuitesPropertyValue != null) {
+  LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuitesPropertyValue);
+  System.setProperty(HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue);
+  cipherSuitesPropertyValue = null;
+}
+  }
+
+  private static void turnOnSSLDebugLogging() {
+String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
+if (sslDebug != null) {
+  sslDebugPropertyValue = sslDebug;
+}
+System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
+  }
+
+  private static void restoreSSLDebugLogging() {
+if (sslDebugPropertyValue != null) {
+  System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyVal

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 50ba2272e -> 42c47971d


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42c47971
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42c47971
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42c47971

Branch: refs/heads/branch-2.9
Commit: 42c47971d8bb2ce2ea06b4e94d7c12b4b61870cd
Parents: 50ba227
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:20:37 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 54 +++-
 1 file changed, 53 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42c47971/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 3c68986..38fd926 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -58,11 +58,15 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -83,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
 
@@ -127,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void restoreHttpsCipherSuites() {
+if (cipherSuitesPropertyValue != null) {
+  LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuitesPropertyValue);
+  System.setProperty(HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue);
+  cipherSuitesPropertyValue = null;
+}
+  }
+
+  private static void turnOnSSLDebugLogging() {
+String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
+if (sslDebug != null) {
+  sslDebugPropertyValue = sslDebug;
+}
+System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
+  }
+
+  private static void restoreSSLDebugLogging() {
+if (sslDebugPropertyValue != null) {
+  System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyValue);
+  sslDebug

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 2a6b62655 -> 5232653ec


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)

 Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5232653e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5232653e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5232653e

Branch: refs/heads/branch-2.8
Commit: 5232653ec0bf8d08187f41930eec073e1b7b1df2
Parents: 2a6b626
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:20:48 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 61 ++--
 1 file changed, 57 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5232653e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index f52a055..38fd926 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -32,8 +32,6 @@ import javax.net.ssl.SSLHandshakeException;
 import javax.net.ssl.SSLSocket;
 import javax.net.ssl.SSLSocketFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.IOUtils;
@@ -45,10 +43,12 @@ import org.apache.hadoop.util.StringUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -56,12 +56,17 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
   private static final String BASEDIR =
   GenericTestUtils.getTempPath(TestSSLHttpServer.class.getSimpleName());
 
-  private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -82,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
 
@@ -126,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearP

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e2210a517 -> 805647287


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80564728
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80564728
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80564728

Branch: refs/heads/branch-2
Commit: 8056472879ff150011887e8f12948ed2ce7534ca
Parents: e2210a5
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:20:29 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 54 +++-
 1 file changed, 53 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80564728/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 3c68986..38fd926 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -58,11 +58,15 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -83,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
 
@@ -127,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void restoreHttpsCipherSuites() {
+if (cipherSuitesPropertyValue != null) {
+  LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuitesPropertyValue);
+  System.setProperty(HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue);
+  cipherSuitesPropertyValue = null;
+}
+  }
+
+  private static void turnOnSSLDebugLogging() {
+String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
+if (sslDebug != null) {
+  sslDebugPropertyValue = sslDebug;
+}
+System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
+  }
+
+  private static void restoreSSLDebugLogging() {
+if (sslDebugPropertyValue != null) {
+  System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyValue);
+  sslDebug

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 06f0d5e25 -> 7556b09e9


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.

(cherry picked from commit 8d7c93186e3090b19aa59006bb6b32ba929bd8e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7556b09e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7556b09e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7556b09e

Branch: refs/heads/branch-3.1
Commit: 7556b09e9a790e5bc697396b0871d99ed3cf1318
Parents: 06f0d5e
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:20:12 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 54 +++-
 1 file changed, 53 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7556b09e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 5af6d6f..2166464 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -58,11 +58,15 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -83,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
 
@@ -127,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void restoreHttpsCipherSuites() {
+if (cipherSuitesPropertyValue != null) {
+  LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuitesPropertyValue);
+  System.setProperty(HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue);
+  cipherSuitesPropertyValue = null;
+}
+  }
+
+  private static void turnOnSSLDebugLogging() {
+String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
+if (sslDebug != null) {
+  sslDebugPropertyValue = sslDebug;
+}
+System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
+  }
+
+  private static void restoreSSLDebugLogging() {
+if (sslDebugPropertyValue != null) {
+  System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyVal

hadoop git commit: HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard Nemeth.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk fb5b3dce6 -> 8d7c93186


HADOOP-15674. Test failure TestSSLHttpServer.testExcludedCiphers with 
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suite. Contributed by Szilard 
Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d7c9318
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d7c9318
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d7c9318

Branch: refs/heads/trunk
Commit: 8d7c93186e3090b19aa59006bb6b32ba929bd8e6
Parents: fb5b3dc
Author: Xiao Chen 
Authored: Fri Aug 17 10:08:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Aug 17 10:09:23 2018 -0700

--
 .../apache/hadoop/http/TestSSLHttpServer.java   | 54 +++-
 1 file changed, 53 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d7c9318/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 5af6d6f..2166464 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This testcase issues SSL certificates configures the HttpServer to serve
- * HTTPS using the created certficates and calls an echo servlet using the
+ * HTTPS using the created certificates and calls an echo servlet using the
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
@@ -58,11 +58,15 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(TestSSLHttpServer.class);
+  private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
+  private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
   private static Configuration conf;
   private static HttpServer2 server;
   private static String keystoresDir;
   private static String sslConfDir;
   private static SSLFactory clientSslFactory;
+  private static String cipherSuitesPropertyValue;
+  private static String sslDebugPropertyValue;
   private static final String excludeCiphers = 
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
   + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
   + "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -83,6 +87,9 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass
   public static void setup() throws Exception {
+turnOnSSLDebugLogging();
+storeHttpsCipherSuites();
+
 conf = new Configuration();
 conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
 
@@ -127,6 +134,51 @@ public class TestSSLHttpServer extends 
HttpServerFunctionalTest {
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
+restoreHttpsCipherSuites();
+restoreSSLDebugLogging();
+  }
+
+  /**
+   * Stores the JVM property value of https.cipherSuites and sets its
+   * value to an empty string.
+   * This ensures that the value https.cipherSuites does
+   * not affect the result of tests.
+   */
+  private static void storeHttpsCipherSuites() {
+String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
+if (cipherSuites != null) {
+  LOG.info(
+  "Found value for property {}: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuites);
+  cipherSuitesPropertyValue = cipherSuites;
+}
+System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
+  }
+
+  private static void restoreHttpsCipherSuites() {
+if (cipherSuitesPropertyValue != null) {
+  LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
+  cipherSuitesPropertyValue);
+  System.setProperty(HTTPS_CIPHER_SUITES_KEY, cipherSuitesPropertyValue);
+  cipherSuitesPropertyValue = null;
+}
+  }
+
+  private static void turnOnSSLDebugLogging() {
+String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
+if (sslDebug != null) {
+  sslDebugPropertyValue = sslDebug;
+}
+System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
+  }
+
+  private static void restoreSSLDebugLogging() {
+if (sslDebugPropertyValue != null) {
+  System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyValue);
+  sslDebugPropertyValue = null;
+  

hadoop git commit: HDFS-13747. Statistic for list_located_status is incremented incorrectly by listStatusIterator. Contributed by Antal Mihalyi.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 3532bd5c8 -> 06f0d5e25


HDFS-13747. Statistic for list_located_status is incremented incorrectly by 
listStatusIterator. Contributed by Antal Mihalyi.

(cherry picked from commit c67b0650ea10896c6289703595faef0d262c00b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06f0d5e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06f0d5e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06f0d5e2

Branch: refs/heads/branch-3.1
Commit: 06f0d5e257a3208f905f59019623a589825e4c8b
Parents: 3532bd5
Author: Xiao Chen 
Authored: Thu Aug 16 23:13:10 2018 -0700
Committer: Xiao Chen 
Committed: Thu Aug 16 23:15:48 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DistributedFileSystem.java| 6 +-
 .../org/apache/hadoop/hdfs/TestDistributedFileSystem.java | 7 +++
 2 files changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06f0d5e2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 3519c60..de05f82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1217,7 +1217,11 @@ public class DistributedFileSystem extends FileSystem
   thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME,
   needLocation);
   statistics.incrementReadOps(1);
-  storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  if (needLocation) {
+storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  } else {
+storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
+  }
   if (thisListing == null) { // the directory does not exist
 throw new FileNotFoundException("File " + p + " does not exist.");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06f0d5e2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 072ee9f..03e6c8a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -706,6 +706,7 @@ public class TestDistributedFileSystem {
   // Iterative ls test
   long mkdirOp = getOpStatistics(OpType.MKDIRS);
   long listStatusOp = getOpStatistics(OpType.LIST_STATUS);
+  long locatedListStatusOP = getOpStatistics(OpType.LIST_LOCATED_STATUS);
   for (int i = 0; i < 10; i++) {
 Path p = new Path(dir, Integer.toString(i));
 fs.mkdirs(p);
@@ -729,6 +730,12 @@ public class TestDistributedFileSystem {
 checkStatistics(fs, readOps, ++writeOps, largeReadOps);
 checkOpStatistics(OpType.MKDIRS, mkdirOp);
 checkOpStatistics(OpType.LIST_STATUS, listStatusOp);
+
+fs.listLocatedStatus(dir);
+locatedListStatusOP++;
+readOps++;
+checkStatistics(fs, readOps, writeOps, largeReadOps);
+checkOpStatistics(OpType.LIST_LOCATED_STATUS, locatedListStatusOP);
   }
   
   opCount = getOpStatistics(OpType.GET_STATUS);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13747. Statistic for list_located_status is incremented incorrectly by listStatusIterator. Contributed by Antal Mihalyi.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 90bf2d3b5 -> 185c8f2ab


HDFS-13747. Statistic for list_located_status is incremented incorrectly by 
listStatusIterator. Contributed by Antal Mihalyi.

(cherry picked from commit c67b0650ea10896c6289703595faef0d262c00b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/185c8f2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/185c8f2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/185c8f2a

Branch: refs/heads/branch-3.0
Commit: 185c8f2abc364e4941ca4d4522fb61b5b3f5f903
Parents: 90bf2d3
Author: Xiao Chen 
Authored: Thu Aug 16 23:13:10 2018 -0700
Committer: Xiao Chen 
Committed: Thu Aug 16 23:15:58 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DistributedFileSystem.java| 6 +-
 .../org/apache/hadoop/hdfs/TestDistributedFileSystem.java | 7 +++
 2 files changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/185c8f2a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 84d840f..9208e66 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1143,7 +1143,11 @@ public class DistributedFileSystem extends FileSystem
   thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME,
   needLocation);
   statistics.incrementReadOps(1);
-  storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  if (needLocation) {
+storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  } else {
+storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
+  }
   if (thisListing == null) { // the directory does not exist
 throw new FileNotFoundException("File " + p + " does not exist.");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/185c8f2a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 072ee9f..03e6c8a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -706,6 +706,7 @@ public class TestDistributedFileSystem {
   // Iterative ls test
   long mkdirOp = getOpStatistics(OpType.MKDIRS);
   long listStatusOp = getOpStatistics(OpType.LIST_STATUS);
+  long locatedListStatusOP = getOpStatistics(OpType.LIST_LOCATED_STATUS);
   for (int i = 0; i < 10; i++) {
 Path p = new Path(dir, Integer.toString(i));
 fs.mkdirs(p);
@@ -729,6 +730,12 @@ public class TestDistributedFileSystem {
 checkStatistics(fs, readOps, ++writeOps, largeReadOps);
 checkOpStatistics(OpType.MKDIRS, mkdirOp);
 checkOpStatistics(OpType.LIST_STATUS, listStatusOp);
+
+fs.listLocatedStatus(dir);
+locatedListStatusOP++;
+readOps++;
+checkStatistics(fs, readOps, writeOps, largeReadOps);
+checkOpStatistics(OpType.LIST_LOCATED_STATUS, locatedListStatusOP);
   }
   
   opCount = getOpStatistics(OpType.GET_STATUS);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13747. Statistic for list_located_status is incremented incorrectly by listStatusIterator. Contributed by Antal Mihalyi.

2018-08-17 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1697a0230 -> c67b0650e


HDFS-13747. Statistic for list_located_status is incremented incorrectly by 
listStatusIterator. Contributed by Antal Mihalyi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c67b0650
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c67b0650
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c67b0650

Branch: refs/heads/trunk
Commit: c67b0650ea10896c6289703595faef0d262c00b3
Parents: 1697a02
Author: Xiao Chen 
Authored: Thu Aug 16 23:13:10 2018 -0700
Committer: Xiao Chen 
Committed: Thu Aug 16 23:14:21 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DistributedFileSystem.java| 6 +-
 .../org/apache/hadoop/hdfs/TestDistributedFileSystem.java | 7 +++
 2 files changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c67b0650/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 70b3679..28c1e27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1217,7 +1217,11 @@ public class DistributedFileSystem extends FileSystem
   thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME,
   needLocation);
   statistics.incrementReadOps(1);
-  storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  if (needLocation) {
+storageStatistics.incrementOpCounter(OpType.LIST_LOCATED_STATUS);
+  } else {
+storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
+  }
   if (thisListing == null) { // the directory does not exist
 throw new FileNotFoundException("File " + p + " does not exist.");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c67b0650/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index f09255e..46323dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -706,6 +706,7 @@ public class TestDistributedFileSystem {
   // Iterative ls test
   long mkdirOp = getOpStatistics(OpType.MKDIRS);
   long listStatusOp = getOpStatistics(OpType.LIST_STATUS);
+  long locatedListStatusOP = getOpStatistics(OpType.LIST_LOCATED_STATUS);
   for (int i = 0; i < 10; i++) {
 Path p = new Path(dir, Integer.toString(i));
 fs.mkdirs(p);
@@ -729,6 +730,12 @@ public class TestDistributedFileSystem {
 checkStatistics(fs, readOps, ++writeOps, largeReadOps);
 checkOpStatistics(OpType.MKDIRS, mkdirOp);
 checkOpStatistics(OpType.LIST_STATUS, listStatusOp);
+
+fs.listLocatedStatus(dir);
+locatedListStatusOP++;
+readOps++;
+checkStatistics(fs, readOps, writeOps, largeReadOps);
+checkOpStatistics(OpType.LIST_LOCATED_STATUS, locatedListStatusOP);
   }
   
   opCount = getOpStatistics(OpType.GET_STATUS);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15655. Enhance KMS client retry behavior. Contributed by Kitti Nanasi.

2018-08-16 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2d13e410d -> edeb2a356


HADOOP-15655. Enhance KMS client retry behavior. Contributed by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edeb2a35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edeb2a35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edeb2a35

Branch: refs/heads/trunk
Commit: edeb2a356ad671d962764c6e2aee9f9e7d6f394c
Parents: 2d13e41
Author: Xiao Chen 
Authored: Thu Aug 16 22:32:32 2018 -0700
Committer: Xiao Chen 
Committed: Thu Aug 16 22:42:03 2018 -0700

--
 .../key/kms/LoadBalancingKMSClientProvider.java |  43 ++---
 .../kms/TestLoadBalancingKMSClientProvider.java | 181 ++-
 2 files changed, 193 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edeb2a35/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index 23cdc50..e68e844 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -113,8 +113,8 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 return providers;
   }
 
-  private  T doOp(ProviderCallable op, int currPos)
-  throws IOException {
+  private  T doOp(ProviderCallable op, int currPos,
+  boolean isIdempotent) throws IOException {
 if (providers.length == 0) {
   throw new IOException("No providers configured !");
 }
@@ -143,7 +143,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 }
 RetryAction action = null;
 try {
-  action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false);
+  action = retryPolicy.shouldRetry(ioe, 0, numFailovers, isIdempotent);
 } catch (Exception e) {
   if (e instanceof IOException) {
 throw (IOException)e;
@@ -201,7 +201,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   public Token[] call(KMSClientProvider provider) throws IOException {
 return provider.addDelegationTokens(renewer, credentials);
   }
-}, nextIdx());
+}, nextIdx(), false);
   }
 
   @Override
@@ -211,7 +211,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   public Long call(KMSClientProvider provider) throws IOException {
 return provider.renewDelegationToken(token);
   }
-}, nextIdx());
+}, nextIdx(), false);
   }
 
   @Override
@@ -222,7 +222,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 provider.cancelDelegationToken(token);
 return null;
   }
-}, nextIdx());
+}, nextIdx(), false);
   }
 
   // This request is sent to all providers in the load-balancing group
@@ -275,7 +275,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 throws IOException, GeneralSecurityException {
   return provider.generateEncryptedKey(encryptionKeyName);
 }
-  }, nextIdx());
+  }, nextIdx(), true);
 } catch (WrapperException we) {
   if (we.getCause() instanceof GeneralSecurityException) {
 throw (GeneralSecurityException) we.getCause();
@@ -295,7 +295,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 throws IOException, GeneralSecurityException {
   return provider.decryptEncryptedKey(encryptedKeyVersion);
 }
-  }, nextIdx());
+  }, nextIdx(), true);
 } catch (WrapperException we) {
   if (we.getCause() instanceof GeneralSecurityException) {
 throw (GeneralSecurityException) we.getCause();
@@ -315,7 +315,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 throws IOException, GeneralSecurityException {
   return provider.reencryptEncryptedKey(ekv);
 }
-  }, nextIdx());
+  }, nextIdx(), true);
 } catch (WrapperException we) {
   if (we.getCause() instanceof GeneralSecurityException) {
 throw (GeneralSecurityException) we.getCause();
@@ -335,7 +335,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   provider.reencryptEncryptedKeys(ekvs);
 

hadoop git commit: HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel.

2018-08-15 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk d951af22b -> 7dc79a8b5


HDFS-13732. ECAdmin should print the policy name when an EC policy is set. 
Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dc79a8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dc79a8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dc79a8b

Branch: refs/heads/trunk
Commit: 7dc79a8b5b7af0bf37d25a221be8ed446b0edb74
Parents: d951af2
Author: Xiao Chen 
Authored: Wed Aug 15 13:51:14 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 15 13:53:47 2018 -0700

--
 .../main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 12 ++--
 .../src/test/resources/testErasureCodingConf.xml|  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dc79a8b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 56706b2..56d453b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -357,16 +357,16 @@ public class ECAdmin extends Configured implements Tool {
   final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
-if (ecPolicyName == null){
-  ecPolicyName = "default";
-}
-System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
-" " + path);
+
+String actualECPolicyName = dfs.getErasureCodingPolicy(p).getName();
+
+System.out.println("Set " + actualECPolicyName +
+" erasure coding policy on "+ path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
   "non-empty directory will not automatically convert existing " +
-  "files to " + ecPolicyName + " erasure coding policy");
+  "files to " + actualECPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dc79a8b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 9070367..b47d50f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -710,7 +710,7 @@
   
 
   SubstringComparator
-  Set default erasure coding policy on 
/ecdir
+  Set RS-6-3-1024k erasure coding policy on 
/ecdir
 
   
 
@@ -728,11 +728,11 @@
   
 
   SubstringComparator
-  Set default erasure coding policy on 
/ecdir
+  Set RS-6-3-1024k erasure coding policy on 
/ecdir
 
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13217. Audit log all EC policy names during addErasureCodingPolicies. Contributed by liaoyuxiangqin.

2018-08-15 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3e3963b03 -> ef02f9b23


HDFS-13217. Audit log all EC policy names during addErasureCodingPolicies. 
Contributed by liaoyuxiangqin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef02f9b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef02f9b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef02f9b2

Branch: refs/heads/trunk
Commit: ef02f9b233116a10cf9274b3753fe0b8dcbe8d92
Parents: 3e3963b
Author: Xiao Chen 
Authored: Wed Aug 15 09:22:24 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 15 09:23:05 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef02f9b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ecf7fce..cdd7d48 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7538,9 +7538,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   ErasureCodingPolicy[] policies, final boolean logRetryCache)
   throws IOException {
 final String operationName = "addErasureCodingPolicies";
-String addECPolicyName = "";
+List addECPolicyNames = new ArrayList<>(policies.length);
 checkOperation(OperationCategory.WRITE);
-List responses = new ArrayList<>();
+List responses =
+new ArrayList<>(policies.length);
 boolean success = false;
 writeLock();
 try {
@@ -7551,7 +7552,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   ErasureCodingPolicy newPolicy =
   FSDirErasureCodingOp.addErasureCodingPolicy(this, policy,
   logRetryCache);
-  addECPolicyName = newPolicy.getName();
+  addECPolicyNames.add(newPolicy.getName());
   responses.add(new AddErasureCodingPolicyResponse(newPolicy));
 } catch (HadoopIllegalArgumentException e) {
   responses.add(new AddErasureCodingPolicyResponse(policy, e));
@@ -7564,7 +7565,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   if (success) {
 getEditLog().logSync();
   }
-  logAuditEvent(success, operationName, addECPolicyName, null, null);
+  logAuditEvent(success, operationName, addECPolicyNames.toString(),
+  null, null);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13788. Update EC documentation about rack fault tolerance. Contributed by Kitti Nanasi.

2018-08-14 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 6f2a46578 -> 5d16301ad


HDFS-13788. Update EC documentation about rack fault tolerance. Contributed by 
Kitti Nanasi.

(cherry picked from commit cede33997f7ab09fc046017508b680e282289ce3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d16301a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d16301a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d16301a

Branch: refs/heads/branch-3.0
Commit: 5d16301ad9d5133d1a015b032ebe219146d64cd3
Parents: 6f2a465
Author: Xiao Chen 
Authored: Tue Aug 14 11:56:51 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 14 11:59:05 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md   | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d16301a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 4459c94..9ea3caa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -103,10 +103,10 @@ Deployment
   This means that when reading and writing striped files, most operations are 
off-rack.
   Network bisection bandwidth is thus very important.
 
-  For rack fault-tolerance, it is also important to have at least as many 
racks as the configured EC stripe width.
-  For EC policy RS (6,3), this means minimally 9 racks, and ideally 10 or 11 
to handle planned and unplanned outages.
-  For clusters with fewer racks than the stripe width, HDFS cannot maintain 
rack fault-tolerance, but will still attempt
-  to spread a striped file across multiple nodes to preserve node-level 
fault-tolerance.
+  For rack fault-tolerance, it is also important to have enough number of 
racks, so that on average, each rack holds number of blocks no more than the 
number of EC parity blocks. A formula to calculate this would be (data blocks + 
parity blocks) / parity blocks, rounding up.
+  For EC policy RS (6,3), this means minimally 3 racks (calculated by (6 + 3) 
/ 3 = 3), and ideally 9 or more to handle planned and unplanned outages.
+  For clusters with fewer racks than the number of the parity cells, HDFS 
cannot maintain rack fault-tolerance, but will still attempt
+  to spread a striped file across multiple nodes to preserve node-level 
fault-tolerance. For this reason, it is recommended to setup racks with similar 
number of DataNodes.
 
 ### Configuration keys
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13788. Update EC documentation about rack fault tolerance. Contributed by Kitti Nanasi.

2018-08-14 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 7a115e8de -> 83d0d82a6


HDFS-13788. Update EC documentation about rack fault tolerance. Contributed by 
Kitti Nanasi.

(cherry picked from commit cede33997f7ab09fc046017508b680e282289ce3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83d0d82a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83d0d82a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83d0d82a

Branch: refs/heads/branch-3.1
Commit: 83d0d82a69dd35f5cac695eb2a491abdca0852a1
Parents: 7a115e8
Author: Xiao Chen 
Authored: Tue Aug 14 11:56:51 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 14 11:57:50 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md   | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83d0d82a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 60fd3ab..f3b920f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -107,10 +107,10 @@ Deployment
   This means that when reading and writing striped files, most operations are 
off-rack.
   Network bisection bandwidth is thus very important.
 
-  For rack fault-tolerance, it is also important to have at least as many 
racks as the configured EC stripe width.
-  For EC policy RS (6,3), this means minimally 9 racks, and ideally 10 or 11 
to handle planned and unplanned outages.
-  For clusters with fewer racks than the stripe width, HDFS cannot maintain 
rack fault-tolerance, but will still attempt
-  to spread a striped file across multiple nodes to preserve node-level 
fault-tolerance.
+  For rack fault-tolerance, it is also important to have enough number of 
racks, so that on average, each rack holds number of blocks no more than the 
number of EC parity blocks. A formula to calculate this would be (data blocks + 
parity blocks) / parity blocks, rounding up.
+  For EC policy RS (6,3), this means minimally 3 racks (calculated by (6 + 3) 
/ 3 = 3), and ideally 9 or more to handle planned and unplanned outages.
+  For clusters with fewer racks than the number of the parity cells, HDFS 
cannot maintain rack fault-tolerance, but will still attempt
+  to spread a striped file across multiple nodes to preserve node-level 
fault-tolerance. For this reason, it is recommended to setup racks with similar 
number of DataNodes.
 
 ### Configuration keys
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13788. Update EC documentation about rack fault tolerance. Contributed by Kitti Nanasi.

2018-08-14 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7e822ec24 -> cede33997


HDFS-13788. Update EC documentation about rack fault tolerance. Contributed by 
Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cede3399
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cede3399
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cede3399

Branch: refs/heads/trunk
Commit: cede33997f7ab09fc046017508b680e282289ce3
Parents: 7e822ec
Author: Xiao Chen 
Authored: Tue Aug 14 11:56:51 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 14 11:57:22 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md   | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cede3399/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 6ae2086..2e8cbbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -107,10 +107,10 @@ Deployment
   This means that when reading and writing striped files, most operations are 
off-rack.
   Network bisection bandwidth is thus very important.
 
-  For rack fault-tolerance, it is also important to have at least as many 
racks as the configured EC stripe width.
-  For EC policy RS (6,3), this means minimally 9 racks, and ideally 10 or 11 
to handle planned and unplanned outages.
-  For clusters with fewer racks than the stripe width, HDFS cannot maintain 
rack fault-tolerance, but will still attempt
-  to spread a striped file across multiple nodes to preserve node-level 
fault-tolerance.
+  For rack fault-tolerance, it is also important to have enough number of 
racks, so that on average, each rack holds number of blocks no more than the 
number of EC parity blocks. A formula to calculate this would be (data blocks + 
parity blocks) / parity blocks, rounding up.
+  For EC policy RS (6,3), this means minimally 3 racks (calculated by (6 + 3) 
/ 3 = 3), and ideally 9 or more to handle planned and unplanned outages.
+  For clusters with fewer racks than the number of the parity cells, HDFS 
cannot maintain rack fault-tolerance, but will still attempt
+  to spread a striped file across multiple nodes to preserve node-level 
fault-tolerance. For this reason, it is recommended to setup racks with similar 
number of DataNodes.
 
 ### Configuration keys
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13786. EC: Display erasure coding policy for sub-directories is not working. Contributed by Ayush Saxena.

2018-08-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 e05b38f13 -> b3f29e350


HDFS-13786. EC: Display erasure coding policy for sub-directories is not 
working. Contributed by Ayush Saxena.

(cherry picked from commit 2b0f9772417d205e8df16bac6921c2bb8bdcf740)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3f29e35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3f29e35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3f29e35

Branch: refs/heads/branch-3.1
Commit: b3f29e3504ee1f4cd371265bab2810fadf38877e
Parents: e05b38f
Author: Vinayakumar B 
Authored: Wed Aug 8 07:47:10 2018 +0530
Committer: Xiao Chen 
Committed: Mon Aug 13 13:58:27 2018 -0700

--
 .../namenode/ContentSummaryComputationContext.java|  2 ++
 .../apache/hadoop/hdfs/TestErasureCodingPolicies.java | 14 ++
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3f29e35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index c81f82c..95f3fee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -191,6 +191,8 @@ public class ContentSummaryComputationContext {
   .getEnabledPolicyByName(ecPolicyName)
   .getName();
 }
+  } else if (inode.getParent() != null) {
+  return getErasureCodingPolicyName(inode.getParent());
   }
 } catch (IOException ioe) {
   LOG.warn("Encountered error getting ec policy for "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3f29e35/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 0b7d259..5c703c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -154,6 +155,19 @@ public class TestErasureCodingPolicies {
   }
 
   @Test
+  public void testContentSummaryOfECSubdir() throws IOException {
+final Path testDir = new Path("/ec");
+fs.mkdir(testDir, FsPermission.getDirDefault());
+fs.setErasureCodingPolicy(testDir, ecPolicy.getName());
+final Path fPath = new Path("ec/file");
+fs.create(fPath).close();
+final Path subdir = new Path("/ec/sub");
+fs.mkdir(subdir, FsPermission.getDirDefault());
+ContentSummary contentSummary = fs.getContentSummary(subdir);
+assertEquals(ecPolicy.getName(),contentSummary.getErasureCodingPolicy());
+  }
+
+  @Test
   public void testBasicSetECPolicy()
   throws IOException, InterruptedException {
 final Path testDir = new Path("/ec");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13786. EC: Display erasure coding policy for sub-directories is not working. Contributed by Ayush Saxena.

2018-08-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 2d985b6c8 -> eecfbaff3


HDFS-13786. EC: Display erasure coding policy for sub-directories is not 
working. Contributed by Ayush Saxena.

(cherry picked from commit 2b0f9772417d205e8df16bac6921c2bb8bdcf740)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eecfbaff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eecfbaff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eecfbaff

Branch: refs/heads/branch-3.0
Commit: eecfbaff3b907ad94574082b078b23d232bc8cbb
Parents: 2d985b6
Author: Vinayakumar B 
Authored: Wed Aug 8 07:47:10 2018 +0530
Committer: Xiao Chen 
Committed: Mon Aug 13 13:58:37 2018 -0700

--
 .../namenode/ContentSummaryComputationContext.java|  2 ++
 .../apache/hadoop/hdfs/TestErasureCodingPolicies.java | 14 ++
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eecfbaff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index c81f82c..95f3fee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -191,6 +191,8 @@ public class ContentSummaryComputationContext {
   .getEnabledPolicyByName(ecPolicyName)
   .getName();
 }
+  } else if (inode.getParent() != null) {
+  return getErasureCodingPolicyName(inode.getParent());
   }
 } catch (IOException ioe) {
   LOG.warn("Encountered error getting ec policy for "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eecfbaff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 0b7d259..5c703c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -154,6 +155,19 @@ public class TestErasureCodingPolicies {
   }
 
   @Test
+  public void testContentSummaryOfECSubdir() throws IOException {
+final Path testDir = new Path("/ec");
+fs.mkdir(testDir, FsPermission.getDirDefault());
+fs.setErasureCodingPolicy(testDir, ecPolicy.getName());
+final Path fPath = new Path("ec/file");
+fs.create(fPath).close();
+final Path subdir = new Path("/ec/sub");
+fs.mkdir(subdir, FsPermission.getDirDefault());
+ContentSummary contentSummary = fs.getContentSummary(subdir);
+assertEquals(ecPolicy.getName(),contentSummary.getErasureCodingPolicy());
+  }
+
+  @Test
   public void testBasicSetECPolicy()
   throws IOException, InterruptedException {
 final Path testDir = new Path("/ec");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15638. KMS Accept Queue Size default changed from 500 to 128 in Hadoop 3.x. Contributed by Wei-Chiu Chuang.

2018-08-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 07054c599 -> 7730a8523


HADOOP-15638. KMS Accept Queue Size default changed from 500 to 128 in Hadoop 
3.x. Contributed by Wei-Chiu Chuang.

(cherry picked from commit b94c8874e2a634637b1ef5d837f05bc5c5e8e6a6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7730a852
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7730a852
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7730a852

Branch: refs/heads/branch-3.0
Commit: 7730a85233bf6a606a111f59a578b5fa0c95ad8c
Parents: 07054c5
Author: Xiao Chen 
Authored: Mon Aug 13 10:40:07 2018 -0700
Committer: Xiao Chen 
Committed: Mon Aug 13 10:41:14 2018 -0700

--
 .../hadoop-kms/src/main/resources/kms-default.xml| 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7730a852/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml 
b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
index 7055f2d..0c785a2 100644
--- a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
+++ b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
@@ -93,6 +93,14 @@
 
   
 
+  
+hadoop.http.socket.backlog.size
+500
+
+  KMS Server accept queue size.
+
+  
+
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15638. KMS Accept Queue Size default changed from 500 to 128 in Hadoop 3.x. Contributed by Wei-Chiu Chuang.

2018-08-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 baca64cc6 -> 1d748f026


HADOOP-15638. KMS Accept Queue Size default changed from 500 to 128 in Hadoop 
3.x. Contributed by Wei-Chiu Chuang.

(cherry picked from commit b94c8874e2a634637b1ef5d837f05bc5c5e8e6a6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d748f02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d748f02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d748f02

Branch: refs/heads/branch-3.1
Commit: 1d748f026174a0af4b347c012078bd67e5f03542
Parents: baca64c
Author: Xiao Chen 
Authored: Mon Aug 13 10:40:07 2018 -0700
Committer: Xiao Chen 
Committed: Mon Aug 13 10:41:07 2018 -0700

--
 .../hadoop-kms/src/main/resources/kms-default.xml| 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d748f02/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml 
b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
index 7055f2d..0c785a2 100644
--- a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
+++ b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
@@ -93,6 +93,14 @@
 
   
 
+  
+hadoop.http.socket.backlog.size
+500
+
+  KMS Server accept queue size.
+
+  
+
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15638. KMS Accept Queue Size default changed from 500 to 128 in Hadoop 3.x. Contributed by Wei-Chiu Chuang.

2018-08-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 11daa010d -> b94c8874e


HADOOP-15638. KMS Accept Queue Size default changed from 500 to 128 in Hadoop 
3.x. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b94c8874
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b94c8874
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b94c8874

Branch: refs/heads/trunk
Commit: b94c8874e2a634637b1ef5d837f05bc5c5e8e6a6
Parents: 11daa01
Author: Xiao Chen 
Authored: Mon Aug 13 10:40:07 2018 -0700
Committer: Xiao Chen 
Committed: Mon Aug 13 10:40:31 2018 -0700

--
 .../hadoop-kms/src/main/resources/kms-default.xml| 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b94c8874/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml 
b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
index 9f4171b..434adcb 100644
--- a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
+++ b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
@@ -93,6 +93,14 @@
 
   
 
+  
+hadoop.http.socket.backlog.size
+500
+
+  KMS Server accept queue size.
+
+  
+
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13658. Expose HighestPriorityLowRedundancy blocks statistics. Contributed by Kitti Nanasi.

2018-08-08 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk ff06bd1be -> 9499df7b8


HDFS-13658. Expose HighestPriorityLowRedundancy blocks statistics. Contributed 
by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9499df7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9499df7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9499df7b

Branch: refs/heads/trunk
Commit: 9499df7b81b55b488a32fd59798a543dafef4ef8
Parents: ff06bd1
Author: Xiao Chen 
Authored: Wed Aug 8 10:36:44 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 8 10:40:20 2018 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md  |  2 +
 .../hadoop/hdfs/protocol/ECBlockGroupStats.java | 27 +++-
 .../hdfs/protocol/ReplicatedBlockStats.java | 28 -
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 21 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  3 ++
 .../federation/metrics/NamenodeBeanMetrics.java | 10 +
 .../server/federation/router/ErasureCoding.java | 13 ++
 .../server/blockmanagement/BlockManager.java|  8 
 .../blockmanagement/LowRedundancyBlocks.java| 28 +
 .../hdfs/server/namenode/FSNamesystem.java  | 20 -
 .../hdfs/server/namenode/NameNodeMXBean.java| 18 
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 10 +
 .../TestLowRedundancyBlockQueues.java   | 43 +---
 .../namenode/metrics/TestNameNodeMetrics.java   | 12 ++
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 32 +++
 15 files changed, 247 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9499df7b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 4313640..83ad40a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -244,6 +244,8 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed 
heartbeat |
 | `NumStaleStorages` | Number of storages marked as content stale (after 
NameNode restart/failover before first block report is received) |
 | `MissingReplOneBlocks` | Current number of missing blocks with replication 
factor 1 |
+| `HighestPriorityLowRedundancyReplicatedBlocks` | Current number of 
non-corrupt, low redundancy replicated blocks with the highest risk of loss 
(have 0 or 1 replica). Will be recovered with the highest priority. |
+| `HighestPriorityLowRedundancyECBlocks` | Current number of non-corrupt, low 
redundancy EC blocks with the highest risk of loss. Will be recovered with the 
highest priority. |
 | `NumFilesUnderConstruction` | Current number of files under construction |
 | `NumActiveClients` | Current number of active clients holding lease |
 | `HAState` | (HA-only) Current state of the NameNode: initializing or active 
or standby or stopping state |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9499df7b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
index 9a8ad8c..3dde604 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
@@ -34,15 +34,26 @@ public final class ECBlockGroupStats {
   private final long missingBlockGroups;
   private final long bytesInFutureBlockGroups;
   private final long pendingDeletionBlocks;
+  private final Long highestPriorityLowRedundancyBlocks;
 
   public ECBlockGroupStats(long lowRedundancyBlockGroups,
   long corruptBlockGroups, long missingBlockGroups,
   long bytesInFutureBlockGroups, long pendingDeletionBlocks) {
+this(lowRedundancyBlockGroups, corruptBlockGroups, missingBlockGroups,
+bytesInFutureBlockGroups, pendingDeletionBlocks, null);
+  }
+
+  public ECBlockGroupStats(long lowRedundancyBlockGroups,
+  long corruptBlockGroups, long missingBlockGroups,
+  long bytesInFutureBlockGroups, long pendingDeletionBlocks,
+  Long highestPriorityLowRedundancyBlocks) {
 this.lowRedundancyBlockGro

hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.

2018-08-07 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 a3675f382 -> 8e5081569


HDFS-13728. Disk Balancer should not fail if volume usage is greater than 
capacity. Contributed by Stephen O'Donnell.

(cherry picked from commit 6677717c689cc94a15f14c3466242e23652d473b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e508156
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e508156
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e508156

Branch: refs/heads/branch-3.0
Commit: 8e5081569f00cde23e58e234dc22a1dabb20323a
Parents: a3675f3
Author: Xiao Chen 
Authored: Tue Aug 7 22:04:41 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 7 22:05:59 2018 -0700

--
 .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 -
 .../hdfs/server/diskbalancer/TestDataModels.java   | 16 
 2 files changed, 28 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e508156/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index a9fd7f0..e43b83e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
-import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 
@@ -35,6 +36,9 @@ public class DiskBalancerVolume {
   private static final ObjectReader READER =
   new ObjectMapper().readerFor(DiskBalancerVolume.class);
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(DiskBalancerVolume.class);
+
   private String path;
   private long capacity;
   private String storageType;
@@ -269,10 +273,13 @@ public class DiskBalancerVolume {
* @param dfsUsedSpace - dfsUsedSpace for this volume.
*/
   public void setUsed(long dfsUsedSpace) {
-Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(),
-"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)",
-dfsUsedSpace, getCapacity());
-this.used = dfsUsedSpace;
+if (dfsUsedSpace > this.getCapacity()) {
+  LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+
+this.getCapacity()+"). Setting volume usage to the capacity");
+  this.used = this.getCapacity();
+} else {
+  this.used = dfsUsedSpace;
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e508156/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
index ace8212..12fbcf1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
@@ -224,4 +224,20 @@ public class TestDataModels {
 Assert
 .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size());
   }
+
+  @Test
+  public void testUsageLimitedToCapacity() throws Exception {
+DiskBalancerTestUtil util = new DiskBalancerTestUtil();
+
+// If usage is greater than capacity, then it should be set to capacity
+DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK);
+v1.setCapacity(DiskBalancerTestUtil.GB);
+v1.setUsed(2 * DiskBalancerTestUtil.GB);
+Assert.assertEquals(v1.getUsed(),v1.getCapacity());
+// If usage is less than capacity, usage should be set to the real usage
+DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK);
+v2.setCapacity(2*DiskBalancerTestUtil.GB);
+v2.setUsed(DiskBalancerTestUtil.GB);
+Asse

hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.

2018-08-07 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 f2768eaa3 -> bf03b25f4


HDFS-13728. Disk Balancer should not fail if volume usage is greater than 
capacity. Contributed by Stephen O'Donnell.

(cherry picked from commit 6677717c689cc94a15f14c3466242e23652d473b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf03b25f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf03b25f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf03b25f

Branch: refs/heads/branch-3.1
Commit: bf03b25f4b940d9ee8507795fb85b2b6f36e2cf7
Parents: f2768ea
Author: Xiao Chen 
Authored: Tue Aug 7 22:04:41 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 7 22:05:51 2018 -0700

--
 .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 -
 .../hdfs/server/diskbalancer/TestDataModels.java   | 16 
 2 files changed, 28 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf03b25f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index a9fd7f0..e43b83e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
-import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 
@@ -35,6 +36,9 @@ public class DiskBalancerVolume {
   private static final ObjectReader READER =
   new ObjectMapper().readerFor(DiskBalancerVolume.class);
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(DiskBalancerVolume.class);
+
   private String path;
   private long capacity;
   private String storageType;
@@ -269,10 +273,13 @@ public class DiskBalancerVolume {
* @param dfsUsedSpace - dfsUsedSpace for this volume.
*/
   public void setUsed(long dfsUsedSpace) {
-Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(),
-"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)",
-dfsUsedSpace, getCapacity());
-this.used = dfsUsedSpace;
+if (dfsUsedSpace > this.getCapacity()) {
+  LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+
+this.getCapacity()+"). Setting volume usage to the capacity");
+  this.used = this.getCapacity();
+} else {
+  this.used = dfsUsedSpace;
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf03b25f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
index ace8212..12fbcf1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
@@ -224,4 +224,20 @@ public class TestDataModels {
 Assert
 .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size());
   }
+
+  @Test
+  public void testUsageLimitedToCapacity() throws Exception {
+DiskBalancerTestUtil util = new DiskBalancerTestUtil();
+
+// If usage is greater than capacity, then it should be set to capacity
+DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK);
+v1.setCapacity(DiskBalancerTestUtil.GB);
+v1.setUsed(2 * DiskBalancerTestUtil.GB);
+Assert.assertEquals(v1.getUsed(),v1.getCapacity());
+// If usage is less than capacity, usage should be set to the real usage
+DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK);
+v2.setCapacity(2*DiskBalancerTestUtil.GB);
+v2.setUsed(DiskBalancerTestUtil.GB);
+Asse

hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.

2018-08-07 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2b0f97724 -> 6677717c6


HDFS-13728. Disk Balancer should not fail if volume usage is greater than 
capacity. Contributed by Stephen O'Donnell.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6677717c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6677717c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6677717c

Branch: refs/heads/trunk
Commit: 6677717c689cc94a15f14c3466242e23652d473b
Parents: 2b0f977
Author: Xiao Chen 
Authored: Tue Aug 7 22:04:41 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 7 22:05:17 2018 -0700

--
 .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 -
 .../hdfs/server/diskbalancer/TestDataModels.java   | 16 
 2 files changed, 28 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6677717c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index a9fd7f0..e43b83e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
-import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 
@@ -35,6 +36,9 @@ public class DiskBalancerVolume {
   private static final ObjectReader READER =
   new ObjectMapper().readerFor(DiskBalancerVolume.class);
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(DiskBalancerVolume.class);
+
   private String path;
   private long capacity;
   private String storageType;
@@ -269,10 +273,13 @@ public class DiskBalancerVolume {
* @param dfsUsedSpace - dfsUsedSpace for this volume.
*/
   public void setUsed(long dfsUsedSpace) {
-Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(),
-"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)",
-dfsUsedSpace, getCapacity());
-this.used = dfsUsedSpace;
+if (dfsUsedSpace > this.getCapacity()) {
+  LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+
+this.getCapacity()+"). Setting volume usage to the capacity");
+  this.used = this.getCapacity();
+} else {
+  this.used = dfsUsedSpace;
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6677717c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
index ace8212..12fbcf1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
@@ -224,4 +224,20 @@ public class TestDataModels {
 Assert
 .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size());
   }
+
+  @Test
+  public void testUsageLimitedToCapacity() throws Exception {
+DiskBalancerTestUtil util = new DiskBalancerTestUtil();
+
+// If usage is greater than capacity, then it should be set to capacity
+DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK);
+v1.setCapacity(DiskBalancerTestUtil.GB);
+v1.setUsed(2 * DiskBalancerTestUtil.GB);
+Assert.assertEquals(v1.getUsed(),v1.getCapacity());
+// If usage is less than capacity, usage should be set to the real usage
+DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK);
+v2.setCapacity(2*DiskBalancerTestUtil.GB);
+v2.setUsed(DiskBalancerTestUtil.GB);
+Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB);
+  }
 }



hadoop git commit: HDFS-13799. TestEditLogTailer#testTriggersLogRollsForAllStandbyNN fails due to missing synchronization between rollEditsRpcExecutor and tailerThread shutdown. Contributed by Hrishik

2018-08-07 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk d838179d8 -> 0f8cb127c


HDFS-13799. TestEditLogTailer#testTriggersLogRollsForAllStandbyNN fails due to 
missing synchronization
between rollEditsRpcExecutor and tailerThread shutdown. Contributed 
by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f8cb127
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f8cb127
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f8cb127

Branch: refs/heads/trunk
Commit: 0f8cb127cd759cdc6422d19d8b28f21198ddfd61
Parents: d838179
Author: Xiao Chen 
Authored: Tue Aug 7 16:11:37 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 7 16:13:41 2018 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f8cb127/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 2003f94..b306b8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -234,7 +234,6 @@ public class EditLogTailer {
   }
   
   public void stop() throws IOException {
-rollEditsRpcExecutor.shutdown();
 tailerThread.setShouldRun(false);
 tailerThread.interrupt();
 try {
@@ -242,6 +241,8 @@ public class EditLogTailer {
 } catch (InterruptedException e) {
   LOG.warn("Edit log tailer thread exited with an exception");
   throw new IOException(e);
+} finally {
+  rollEditsRpcExecutor.shutdown();
 }
   }
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   3   4   5   6   >