[hadoop] branch trunk updated (9501c69 -> 1f1a1ef)

2021-03-01 Thread hexiaoqiao
This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 9501c69  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
 add 1f1a1ef  HDFS-15856: Make write pipeline retry times configurable. 
(#2721). Contributed by Qi Zhu

No new revisions were added by this update.

Summary of changes:
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java | 14 ++
 .../apache/hadoop/hdfs/client/HdfsClientConfigKeys.java|  3 +++
 .../org/apache/hadoop/hdfs/client/impl/DfsClientConf.java  | 12 
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml|  9 +
 4 files changed, 34 insertions(+), 4 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu.

2021-03-01 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new cdd3982  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
cdd3982 is described below

commit cdd3982db42ede60fe2d5752951dfe95aada19c9
Author: Konstantin V Shvachko 
AuthorDate: Mon Mar 1 16:48:48 2021 -0800

HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed 
by Qi Zhu.

(cherry picked from commit 9501c698f4789188f744f3a1fba7f1a9bb9b8aa4)
---
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java  | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 24cf469..7394c60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4007,7 +4007,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return blockManager.getMissingReplOneBlocksCount();
   }
   
-  @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+  @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"},
+  type = Metric.Type.COUNTER)
   public int getExpiredHeartbeats() {
 return datanodeStatistics.getExpiredHeartbeats();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index fea377f..1c42c70 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -17,7 +17,7 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
@@ -143,7 +143,7 @@ public class TestDatanodeReport {
   assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
   Thread.sleep(5000);
-  assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+  assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu.

2021-03-01 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 78bd68a  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
78bd68a is described below

commit 78bd68a0b8c09af8017cccdadaa6dd0edf3a9071
Author: Konstantin V Shvachko 
AuthorDate: Mon Mar 1 16:48:48 2021 -0800

HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed 
by Qi Zhu.

(cherry picked from commit 9501c698f4789188f744f3a1fba7f1a9bb9b8aa4)
---
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java  | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index edcbd0f..b13ba0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4188,7 +4188,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return blockManager.getMissingReplOneBlocksCount();
   }
   
-  @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+  @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"},
+  type = Metric.Type.COUNTER)
   public int getExpiredHeartbeats() {
 return datanodeStatistics.getExpiredHeartbeats();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index fea377f..1c42c70 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -17,7 +17,7 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
@@ -143,7 +143,7 @@ public class TestDatanodeReport {
   assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
   Thread.sleep(5000);
-  assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+  assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu.

2021-03-01 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 1d48f3b  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
1d48f3b is described below

commit 1d48f3b44c815193854e754b562dbf00a72f9b32
Author: Konstantin V Shvachko 
AuthorDate: Mon Mar 1 16:48:48 2021 -0800

HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed 
by Qi Zhu.

(cherry picked from commit 9501c698f4789188f744f3a1fba7f1a9bb9b8aa4)
---
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java  | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b543311..319a0c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4249,7 +4249,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return blockManager.getMissingReplOneBlocksCount();
   }
   
-  @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+  @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"},
+  type = Metric.Type.COUNTER)
   public int getExpiredHeartbeats() {
 return datanodeStatistics.getExpiredHeartbeats();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index b4d6fc9..69dbf64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -17,7 +17,7 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
@@ -143,7 +143,7 @@ public class TestDatanodeReport {
   assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
   Thread.sleep(5000);
-  assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+  assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu.

2021-03-01 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 56679e8  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
56679e8 is described below

commit 56679e83bf57dbb0c52858f5916d094d1e3853a5
Author: Konstantin V Shvachko 
AuthorDate: Mon Mar 1 16:48:48 2021 -0800

HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed 
by Qi Zhu.

(cherry picked from commit 9501c698f4789188f744f3a1fba7f1a9bb9b8aa4)
---
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java  | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b706e67..6a6405b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4560,7 +4560,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return blockManager.getMissingReplOneBlocksCount();
   }
   
-  @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+  @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"},
+  type = Metric.Type.COUNTER)
   public int getExpiredHeartbeats() {
 return datanodeStatistics.getExpiredHeartbeats();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index b4d6fc9..69dbf64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -17,7 +17,7 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
@@ -143,7 +143,7 @@ public class TestDatanodeReport {
   assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
   Thread.sleep(5000);
-  assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+  assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu.

2021-03-01 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9501c69  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
9501c69 is described below

commit 9501c698f4789188f744f3a1fba7f1a9bb9b8aa4
Author: Konstantin V Shvachko 
AuthorDate: Mon Mar 1 16:48:48 2021 -0800

HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed 
by Qi Zhu.
---
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java  | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index e98a59d..22b4b92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4632,7 +4632,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return blockManager.getMissingReplOneBlocksCount();
   }
   
-  @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+  @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"},
+  type = Metric.Type.COUNTER)
   public int getExpiredHeartbeats() {
 return datanodeStatistics.getExpiredHeartbeats();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index b4d6fc9..69dbf64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -17,7 +17,7 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
@@ -143,7 +143,7 @@ public class TestDatanodeReport {
   assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
   Thread.sleep(5000);
-  assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+  assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (c3b3b36 -> 32353eb)

2021-03-01 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from c3b3b36  HDFS-14013. Skip any credentials stored in HDFS when starting 
ZKFC. Contributed by Stephen O'Donnell
 add 32353eb  HDFS-15854. Make some parameters configurable for 
SlowDiskTracker and SlowPeerTracker (#2718)

No new revisions were added by this update.

Summary of changes:
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  8 
 .../hdfs/server/blockmanagement/SlowDiskTracker.java   |  7 +--
 .../hdfs/server/blockmanagement/SlowPeerTracker.java   |  7 +--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 18 ++
 4 files changed, 36 insertions(+), 4 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-14013. Skip any credentials stored in HDFS when starting ZKFC. Contributed by Stephen O'Donnell

2021-03-01 Thread sodonnell
This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 53f4621  HDFS-14013. Skip any credentials stored in HDFS when starting 
ZKFC. Contributed by Stephen O'Donnell
53f4621 is described below

commit 53f46214e4726e47aaf4cd6a2dae454f7b4b6438
Author: S O'Donnell 
AuthorDate: Mon Mar 1 11:36:41 2021 +

HDFS-14013. Skip any credentials stored in HDFS when starting ZKFC. 
Contributed by Stephen O'Donnell

(cherry picked from commit c3b3b36dee475e5f37f85946be1a42a1b1e622a5)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java

(cherry picked from commit 707806092f64f709814e18cb0d1b4f58731a5716)
---
 .../org/apache/hadoop/ha/ZKFailoverController.java | 18 --
 .../hdfs/tools/TestDFSZKFailoverController.java| 41 +-
 2 files changed, 48 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 0fb39b6..0373d4e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -31,11 +31,14 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.util.ZKUtil;
 import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
 import org.apache.hadoop.ha.HealthMonitor.State;
@@ -340,8 +343,19 @@ public abstract class ZKFailoverController {
   zkAcls = Ids.CREATOR_ALL_ACL;
 }
 
-// Parse authentication from configuration.
-List zkAuths = SecurityUtil.getZKAuthInfos(conf, ZK_AUTH_KEY);
+// Parse authentication from configuration. Exclude any Credential 
providers
+// using the hdfs scheme to avoid a circular dependency. As HDFS is likely
+// not started when ZKFC is started, we cannot read the credentials from 
it.
+Configuration c = conf;
+try {
+  c = ProviderUtils.excludeIncompatibleCredentialProviders(
+  conf, FileSystem.getFileSystemClass("hdfs", conf));
+} catch (UnsupportedFileSystemException e) {
+  // Should not happen in a real cluster, as the hdfs FS will always be
+  // present. Inside tests, the hdfs filesystem will not be present
+  LOG.debug("No filesystem found for the hdfs scheme", e);
+}
+List zkAuths = SecurityUtil.getZKAuthInfos(c, ZK_AUTH_KEY);
 
 // Sanity check configuration.
 Preconditions.checkArgument(zkQuorum != null,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
index 045ee7c..a2bd075 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
@@ -41,6 +41,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.MockNameNodeResourceChecker;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.net.ServerSocketUtil;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
@@ -86,14 +87,16 @@ public class TestDFSZKFailoverController extends 
ClientBaseWithFixes {
 ServerSocketUtil.getPort(10023, 100));
 conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2",
 ServerSocketUtil.getPort(10024, 100));
+  }
 
+  private void startCluster() throws Exception {
 // prefer non-ephemeral port to avoid port collision on restartNameNode
 MiniDFSNNTopology topology = new MiniDFSNNTopology()
-.addNameservice(new 

[hadoop] branch branch-3.2 updated: HDFS-14013. Skip any credentials stored in HDFS when starting ZKFC. Contributed by Stephen O'Donnell

2021-03-01 Thread sodonnell
This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 7078060  HDFS-14013. Skip any credentials stored in HDFS when starting 
ZKFC. Contributed by Stephen O'Donnell
7078060 is described below

commit 707806092f64f709814e18cb0d1b4f58731a5716
Author: S O'Donnell 
AuthorDate: Mon Mar 1 11:36:41 2021 +

HDFS-14013. Skip any credentials stored in HDFS when starting ZKFC. 
Contributed by Stephen O'Donnell

(cherry picked from commit c3b3b36dee475e5f37f85946be1a42a1b1e622a5)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
---
 .../org/apache/hadoop/ha/ZKFailoverController.java | 18 --
 .../hdfs/tools/TestDFSZKFailoverController.java| 41 +-
 2 files changed, 48 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 0fb39b6..0373d4e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -31,11 +31,14 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.util.ZKUtil;
 import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
 import org.apache.hadoop.ha.HealthMonitor.State;
@@ -340,8 +343,19 @@ public abstract class ZKFailoverController {
   zkAcls = Ids.CREATOR_ALL_ACL;
 }
 
-// Parse authentication from configuration.
-List zkAuths = SecurityUtil.getZKAuthInfos(conf, ZK_AUTH_KEY);
+// Parse authentication from configuration. Exclude any Credential 
providers
+// using the hdfs scheme to avoid a circular dependency. As HDFS is likely
+// not started when ZKFC is started, we cannot read the credentials from 
it.
+Configuration c = conf;
+try {
+  c = ProviderUtils.excludeIncompatibleCredentialProviders(
+  conf, FileSystem.getFileSystemClass("hdfs", conf));
+} catch (UnsupportedFileSystemException e) {
+  // Should not happen in a real cluster, as the hdfs FS will always be
+  // present. Inside tests, the hdfs filesystem will not be present
+  LOG.debug("No filesystem found for the hdfs scheme", e);
+}
+List zkAuths = SecurityUtil.getZKAuthInfos(c, ZK_AUTH_KEY);
 
 // Sanity check configuration.
 Preconditions.checkArgument(zkQuorum != null,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
index 045ee7c..a2bd075 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
@@ -41,6 +41,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.MockNameNodeResourceChecker;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.net.ServerSocketUtil;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
@@ -86,14 +87,16 @@ public class TestDFSZKFailoverController extends 
ClientBaseWithFixes {
 ServerSocketUtil.getPort(10023, 100));
 conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2",
 ServerSocketUtil.getPort(10024, 100));
+  }
 
+  private void startCluster() throws Exception {
 // prefer non-ephemeral port to avoid port collision on restartNameNode
 MiniDFSNNTopology topology = new MiniDFSNNTopology()
-.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new 

[hadoop] branch branch-3.3 updated: HDFS-14013. Skip any credentials stored in HDFS when starting ZKFC. Contributed by Stephen O'Donnell

2021-03-01 Thread sodonnell
This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 24a0304  HDFS-14013. Skip any credentials stored in HDFS when starting 
ZKFC. Contributed by Stephen O'Donnell
24a0304 is described below

commit 24a030415941fbc8b7333f024bf7a095350a5eea
Author: S O'Donnell 
AuthorDate: Mon Mar 1 11:36:41 2021 +

HDFS-14013. Skip any credentials stored in HDFS when starting ZKFC. 
Contributed by Stephen O'Donnell

(cherry picked from commit c3b3b36dee475e5f37f85946be1a42a1b1e622a5)
---
 .../org/apache/hadoop/ha/ZKFailoverController.java | 18 --
 .../hdfs/tools/TestDFSZKFailoverController.java| 41 +-
 2 files changed, 49 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 16d7bf7..1045940 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -31,11 +31,14 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.util.ZKUtil;
 import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
 import org.apache.hadoop.ha.HealthMonitor.State;
@@ -343,8 +346,19 @@ public abstract class ZKFailoverController {
   zkAcls = Ids.CREATOR_ALL_ACL;
 }
 
-// Parse authentication from configuration.
-List zkAuths = SecurityUtil.getZKAuthInfos(conf, ZK_AUTH_KEY);
+// Parse authentication from configuration. Exclude any Credential 
providers
+// using the hdfs scheme to avoid a circular dependency. As HDFS is likely
+// not started when ZKFC is started, we cannot read the credentials from 
it.
+Configuration c = conf;
+try {
+  c = ProviderUtils.excludeIncompatibleCredentialProviders(
+  conf, FileSystem.getFileSystemClass("hdfs", conf));
+} catch (UnsupportedFileSystemException e) {
+  // Should not happen in a real cluster, as the hdfs FS will always be
+  // present. Inside tests, the hdfs filesystem will not be present
+  LOG.debug("No filesystem found for the hdfs scheme", e);
+}
+List zkAuths = SecurityUtil.getZKAuthInfos(c, ZK_AUTH_KEY);
 
 // Sanity check configuration.
 Preconditions.checkArgument(zkQuorum != null,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
index 8f60b1d..0a7a87c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
@@ -93,14 +94,16 @@ public class TestDFSZKFailoverController extends 
ClientBaseWithFixes {
 ServerSocketUtil.getPort(10023, 100));
 conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2",
 ServerSocketUtil.getPort(10024, 100));
+  }
 
+  private void startCluster() throws Exception {
 // prefer non-ephemeral port to avoid port collision on restartNameNode
 MiniDFSNNTopology topology = new MiniDFSNNTopology()
-.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1")
-.setIpcPort(ServerSocketUtil.getPort(10021, 100)))
-.addNN(new MiniDFSNNTopology.NNConf("nn2")
-

[hadoop] branch trunk updated: HDFS-14013. Skip any credentials stored in HDFS when starting ZKFC. Contributed by Stephen O'Donnell

2021-03-01 Thread sodonnell
This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c3b3b36  HDFS-14013. Skip any credentials stored in HDFS when starting 
ZKFC. Contributed by Stephen O'Donnell
c3b3b36 is described below

commit c3b3b36dee475e5f37f85946be1a42a1b1e622a5
Author: S O'Donnell 
AuthorDate: Mon Mar 1 11:36:41 2021 +

HDFS-14013. Skip any credentials stored in HDFS when starting ZKFC. 
Contributed by Stephen O'Donnell
---
 .../org/apache/hadoop/ha/ZKFailoverController.java | 18 --
 .../hdfs/tools/TestDFSZKFailoverController.java| 41 +-
 2 files changed, 49 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 16d7bf7..1045940 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -31,11 +31,14 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
 import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.util.ZKUtil;
 import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
 import org.apache.hadoop.ha.HealthMonitor.State;
@@ -343,8 +346,19 @@ public abstract class ZKFailoverController {
   zkAcls = Ids.CREATOR_ALL_ACL;
 }
 
-// Parse authentication from configuration.
-List zkAuths = SecurityUtil.getZKAuthInfos(conf, ZK_AUTH_KEY);
+// Parse authentication from configuration. Exclude any Credential 
providers
+// using the hdfs scheme to avoid a circular dependency. As HDFS is likely
+// not started when ZKFC is started, we cannot read the credentials from 
it.
+Configuration c = conf;
+try {
+  c = ProviderUtils.excludeIncompatibleCredentialProviders(
+  conf, FileSystem.getFileSystemClass("hdfs", conf));
+} catch (UnsupportedFileSystemException e) {
+  // Should not happen in a real cluster, as the hdfs FS will always be
+  // present. Inside tests, the hdfs filesystem will not be present
+  LOG.debug("No filesystem found for the hdfs scheme", e);
+}
+List zkAuths = SecurityUtil.getZKAuthInfos(c, ZK_AUTH_KEY);
 
 // Sanity check configuration.
 Preconditions.checkArgument(zkQuorum != null,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
index 8f60b1d..0a7a87c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
@@ -93,14 +94,16 @@ public class TestDFSZKFailoverController extends 
ClientBaseWithFixes {
 ServerSocketUtil.getPort(10023, 100));
 conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2",
 ServerSocketUtil.getPort(10024, 100));
+  }
 
+  private void startCluster() throws Exception {
 // prefer non-ephemeral port to avoid port collision on restartNameNode
 MiniDFSNNTopology topology = new MiniDFSNNTopology()
-.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1")
-.setIpcPort(ServerSocketUtil.getPort(10021, 100)))
-.addNN(new MiniDFSNNTopology.NNConf("nn2")
-.setIpcPort(ServerSocketUtil.getPort(10022, 100;
+.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+