[hadoop] branch trunk updated: HDFS-15480. Ordered snapshot deletion: record snapshot deletion in XAttr (#2163)

2020-07-22 Thread szetszwo
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2d12496  HDFS-15480. Ordered snapshot deletion: record snapshot 
deletion in XAttr (#2163)
2d12496 is described below

commit 2d12496643b1b7cfa4eb270ec9b2fcdb78a58798
Author: bshashikant 
AuthorDate: Thu Jul 23 04:46:27 2020 +0530

HDFS-15480. Ordered snapshot deletion: record snapshot deletion in XAttr 
(#2163)
---
 .../hdfs/server/common/HdfsServerConstants.java|   1 +
 .../hdfs/server/namenode/FSDirSnapshotOp.java  |  20 ---
 .../hadoop/hdfs/server/namenode/FSDirXAttrOp.java  |  13 +-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  10 +-
 .../server/namenode/snapshot/SnapshotManager.java  |  50 +--
 .../namenode/TestOrderedSnapshotDeletion.java  | 157 +
 6 files changed, 186 insertions(+), 65 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 78d4289..a55985e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -366,6 +366,7 @@ public interface HdfsServerConstants {
   "security.hdfs.unreadable.by.superuser";
   String XATTR_ERASURECODING_POLICY =
   "system.hdfs.erasurecoding.policy";
+  String SNAPSHOT_XATTR_NAME = "system.hdfs.snapshot.deleted";
 
   String XATTR_SATISFY_STORAGE_POLICY = "user.hdfs.sps";
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
index c2eb401..923c6a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -252,25 +251,6 @@ class FSDirSnapshotOp {
 
 // time of snapshot deletion
 final long now = Time.now();
-if (fsd.isSnapshotDeletionOrdered()) {
-  final INodeDirectory srcRoot = snapshotManager.getSnapshottableRoot(iip);
-  final DirectorySnapshottableFeature snapshottable
-  = srcRoot.getDirectorySnapshottableFeature();
-  final Snapshot snapshot = snapshottable.getSnapshotByName(
-  srcRoot, snapshotName);
-
-  // Diffs must be not empty since a snapshot exists in the list
-  final int earliest = snapshottable.getDiffs().iterator().next()
-  .getSnapshotId();
-  if (snapshot.getId() != earliest) {
-throw new SnapshotException("Failed to delete snapshot " + snapshotName
-+ " from directory " + srcRoot.getFullPathName()
-+ ": " + snapshot + " is not the earliest snapshot id=" + earliest
-+ " (" + DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED
-+ " is " + fsd.isSnapshotDeletionOrdered() + ")");
-  }
-}
-
 final INode.BlocksMapUpdateInfo collectedBlocks = deleteSnapshot(
 fsd, snapshotManager, iip, snapshotName, now);
 fsd.getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index ff82610..4f215ac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -41,12 +41,13 @@ import java.util.EnumSet;
 import java.util.List;
 import java.util.ListIterator;
 
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO;
+import static 

[hadoop] branch branch-3.3 updated: HADOOP-17092. ABFS: Making AzureADAuthenticator.getToken() throw HttpException

2020-07-22 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 1ae72d2  HADOOP-17092. ABFS: Making AzureADAuthenticator.getToken() 
throw HttpException
1ae72d2 is described below

commit 1ae72d243819952d270d65ff0eee31e3eda3c1b4
Author: bilaharith <52483117+bilahar...@users.noreply.github.com>
AuthorDate: Tue Jul 21 21:48:54 2020 +0530

HADOOP-17092. ABFS: Making AzureADAuthenticator.getToken() throw 
HttpException

- Contributed by Bilahari T H

Change-Id: Id9576d9509faaf057bf419ccb1879ac0cef7a07b
---
 .../hadoop/fs/azurebfs/AbfsConfiguration.java  |  27 +
 .../fs/azurebfs/AzureBlobFileSystemStore.java  |   5 +
 .../fs/azurebfs/constants/ConfigurationKeys.java   |   6 ++
 .../constants/FileSystemConfigurations.java|   8 ++
 .../fs/azurebfs/oauth2/AzureADAuthenticator.java   |  42 ++--
 .../azurebfs/services/ExponentialRetryPolicy.java  |  23 
 .../hadoop-azure/src/site/markdown/abfs.md |  20 +++-
 .../services/TestAzureADAuthenticator.java | 120 +
 8 files changed, 244 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java
index 7996ec1..74f98a0 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java
@@ -57,6 +57,7 @@ import 
org.apache.hadoop.fs.azurebfs.oauth2.RefreshTokenBasedTokenProvider;
 import org.apache.hadoop.fs.azurebfs.oauth2.UserPasswordTokenProvider;
 import org.apache.hadoop.fs.azurebfs.security.AbfsDelegationTokenManager;
 import org.apache.hadoop.fs.azurebfs.services.AuthType;
+import org.apache.hadoop.fs.azurebfs.services.ExponentialRetryPolicy;
 import org.apache.hadoop.fs.azurebfs.services.KeyProvider;
 import org.apache.hadoop.fs.azurebfs.services.SimpleKeyProvider;
 import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory;
@@ -119,6 +120,26 @@ public class AbfsConfiguration{
   DefaultValue = DEFAULT_CUSTOM_TOKEN_FETCH_RETRY_COUNT)
   private int customTokenFetchRetryCount;
 
+  @IntegerConfigurationValidatorAnnotation(ConfigurationKey = 
AZURE_OAUTH_TOKEN_FETCH_RETRY_COUNT,
+  MinValue = 0,
+  DefaultValue = DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_ATTEMPTS)
+  private int oauthTokenFetchRetryCount;
+
+  @IntegerConfigurationValidatorAnnotation(ConfigurationKey = 
AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF,
+  MinValue = 0,
+  DefaultValue = 
DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF_INTERVAL)
+  private int oauthTokenFetchRetryMinBackoff;
+
+  @IntegerConfigurationValidatorAnnotation(ConfigurationKey = 
AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF,
+  MinValue = 0,
+  DefaultValue = 
DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF_INTERVAL)
+  private int oauthTokenFetchRetryMaxBackoff;
+
+  @IntegerConfigurationValidatorAnnotation(ConfigurationKey = 
AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF,
+  MinValue = 0,
+  DefaultValue = DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF)
+  private int oauthTokenFetchRetryDeltaBackoff;
+
   @LongConfigurationValidatorAnnotation(ConfigurationKey = 
AZURE_BLOCK_SIZE_PROPERTY_NAME,
   MinValue = 0,
   MaxValue = MAX_AZURE_BLOCK_SIZE,
@@ -720,6 +741,12 @@ public class AbfsConfiguration{
 validator.ThrowIfInvalid()).validate(value);
   }
 
+  public ExponentialRetryPolicy getOauthTokenFetchRetryPolicy() {
+return new ExponentialRetryPolicy(oauthTokenFetchRetryCount,
+oauthTokenFetchRetryMinBackoff, oauthTokenFetchRetryMaxBackoff,
+oauthTokenFetchRetryDeltaBackoff);
+  }
+
   @VisibleForTesting
   void setReadBufferSize(int bufferSize) {
 this.readBufferSize = bufferSize;
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index 1ed4683..35fd439 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.fs.azurebfs.enums.Trilean;
 import org.apache.hadoop.fs.azurebfs.extensions.SASTokenProvider;
 import org.apache.hadoop.fs.azurebfs.extensions.ExtensionHelper;
 import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider;
+import org.apache.hadoop.fs.azurebfs.oauth2.AzureADAuthenticator;
 import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformer;
 import 

[hadoop] branch trunk updated (d5b4766 -> 48a7c5b)

2020-07-22 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from d5b4766  HADOOP-17147. Dead link in hadoop-kms/index.md.vm. 
Contributed by Xieming Li.
 add 48a7c5b  HADOOP-17113. Adding ReadAhead Counters in ABFS (#2154)

No new revisions were added by this update.

Summary of changes:
 .../fs/azurebfs/services/AbfsInputStream.java  |  6 ++
 .../services/AbfsInputStreamStatistics.java| 12 +++
 .../services/AbfsInputStreamStatisticsImpl.java| 36 
 .../azurebfs/ITestAbfsInputStreamStatistics.java   | 95 ++
 4 files changed, 149 insertions(+)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-17147. Dead link in hadoop-kms/index.md.vm. Contributed by Xieming Li.

2020-07-22 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new d35f7ea  HADOOP-17147. Dead link in hadoop-kms/index.md.vm. 
Contributed by Xieming Li.
d35f7ea is described below

commit d35f7eaeb9982dcc0c8cf6a57018d47db717b9e0
Author: Akira Ajisaka 
AuthorDate: Thu Jul 23 00:39:11 2020 +0900

HADOOP-17147. Dead link in hadoop-kms/index.md.vm. Contributed by Xieming 
Li.

(cherry picked from commit d5b476615820a7fa75b41e323db5deb5c2ed3bd5)
---
 hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm 
b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
index 5490219..656e1cf 100644
--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -307,7 +307,7 @@ Configure `etc/hadoop/ssl-server.xml` with proper values, 
for example:
 ```
 
 The SSL passwords can be secured by a credential provider. See
-[Credential Provider 
API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html).
+[Credential Provider 
API](../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html).
 
 You need to create an SSL certificate for the KMS. As the `kms` Unix user, 
using the Java `keytool` command to create the SSL certificate:
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-17147. Dead link in hadoop-kms/index.md.vm. Contributed by Xieming Li.

2020-07-22 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 0fb7c48  HADOOP-17147. Dead link in hadoop-kms/index.md.vm. 
Contributed by Xieming Li.
0fb7c48 is described below

commit 0fb7c48acb4996913bb7d98446e32ad5479475c9
Author: Akira Ajisaka 
AuthorDate: Thu Jul 23 00:39:11 2020 +0900

HADOOP-17147. Dead link in hadoop-kms/index.md.vm. Contributed by Xieming 
Li.

(cherry picked from commit d5b476615820a7fa75b41e323db5deb5c2ed3bd5)
---
 hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm 
b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
index 5490219..656e1cf 100644
--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -307,7 +307,7 @@ Configure `etc/hadoop/ssl-server.xml` with proper values, 
for example:
 ```
 
 The SSL passwords can be secured by a credential provider. See
-[Credential Provider 
API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html).
+[Credential Provider 
API](../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html).
 
 You need to create an SSL certificate for the KMS. As the `kms` Unix user, 
using the Java `keytool` command to create the SSL certificate:
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (ac9a07b -> d5b4766)

2020-07-22 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from ac9a07b  HDFS-15478: When Empty mount points, we are assigning 
fallback link to self. But it should not use full URI for target fs. (#2160). 
Contributed by Uma Maheswara Rao G.
 add d5b4766  HADOOP-17147. Dead link in hadoop-kms/index.md.vm. 
Contributed by Xieming Li.

No new revisions were added by this update.

Summary of changes:
 hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-17147. Dead link in hadoop-kms/index.md.vm. Contributed by Xieming Li.

2020-07-22 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 416edc5  HADOOP-17147. Dead link in hadoop-kms/index.md.vm. 
Contributed by Xieming Li.
416edc5 is described below

commit 416edc5a7bf1fd1ea1569bf7b83d04641a4cfefa
Author: Akira Ajisaka 
AuthorDate: Thu Jul 23 00:39:11 2020 +0900

HADOOP-17147. Dead link in hadoop-kms/index.md.vm. Contributed by Xieming 
Li.

(cherry picked from commit d5b476615820a7fa75b41e323db5deb5c2ed3bd5)
---
 hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm 
b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
index 5490219..656e1cf 100644
--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -307,7 +307,7 @@ Configure `etc/hadoop/ssl-server.xml` with proper values, 
for example:
 ```
 
 The SSL passwords can be secured by a credential provider. See
-[Credential Provider 
API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html).
+[Credential Provider 
API](../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html).
 
 You need to create an SSL certificate for the KMS. As the `kms` Unix user, 
using the Java `keytool` command to create the SSL certificate:
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-17100. Replace Guava Supplier with Java8+ Supplier in Hadoop. Contributed by Ahmed Hussein.

2020-07-22 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 27a97e4  HADOOP-17100. Replace Guava Supplier with Java8+ Supplier in 
Hadoop. Contributed by Ahmed Hussein.
27a97e4 is described below

commit 27a97e4f2877b77e86d0d8d9d91cc2dd9ef18652
Author: Ayush Saxena 
AuthorDate: Wed Jul 22 18:39:49 2020 +0530

HADOOP-17100. Replace Guava Supplier with Java8+ Supplier in Hadoop. 
Contributed by Ahmed Hussein.
---
 .../src/main/resources/checkstyle/checkstyle.xml   |  2 +-
 .../test/java/org/apache/hadoop/conf/TestReconfiguration.java  |  2 +-
 .../test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java   |  2 +-
 .../java/org/apache/hadoop/ha/TestZKFailoverController.java|  2 +-
 .../src/test/java/org/apache/hadoop/ipc/TestIPC.java   |  2 +-
 .../org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java |  2 +-
 .../apache/hadoop/metrics2/lib/TestMutableRollingAverages.java |  2 +-
 .../java/org/apache/hadoop/security/TestGroupsCaching.java |  2 +-
 .../hadoop/security/ssl/TestReloadingX509TrustManager.java |  2 +-
 .../token/delegation/TestZKDelegationTokenSecretManager.java   |  2 +-
 .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java | 10 +-
 .../test/java/org/apache/hadoop/test/TestGenericTestUtils.java |  2 +-
 .../test/java/org/apache/hadoop/tracing/SetSpanReceiver.java   |  2 +-
 .../src/test/java/org/apache/hadoop/util/TestShell.java|  2 +-
 .../java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java  |  2 +-
 .../org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java   |  2 +-
 .../hadoop/hdfs/server/federation/FederationTestUtils.java |  2 +-
 .../hdfs/server/federation/router/TestRouterAdminCLI.java  |  2 +-
 .../hadoop/hdfs/server/federation/router/TestRouterQuota.java  |  2 +-
 .../server/federation/router/TestRouterRPCClientRetries.java   |  2 +-
 .../hadoop/hdfs/server/federation/router/TestRouterRpc.java|  2 +-
 .../org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java |  2 +-
 .../hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java   |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java  |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java   |  2 +-
 .../java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java |  2 +-
 .../hadoop/hdfs/TestClientProtocolForPipelineRecovery.java |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java |  2 +-
 .../java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java |  2 +-
 .../java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java  |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestDecommission.java |  2 +-
 .../java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java |  2 +-
 .../org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java |  2 +-
 .../test/java/org/apache/hadoop/hdfs/TestFileCorruption.java   |  2 +-
 .../test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java   |  2 +-
 .../java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java  |  2 +-
 .../test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestPread.java|  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestQuota.java|  2 +-
 .../org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java   |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestReplication.java  |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java |  2 +-
 .../hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java   |  2 +-
 .../org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java|  2 +-
 .../hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java  |  2 +-
 .../hadoop/hdfs/qjournal/server/TestJournalNodeSync.java   |  2 +-
 .../hdfs/server/blockmanagement/TestBlockManagerSafeMode.java  |  2 +-
 .../server/blockmanagement/TestBlockReportRateLimiting.java|  2 +-
 .../blockmanagement/TestNameNodePrunesMissingStorages.java |  2 +-
 .../server/blockmanagement/TestPendingInvalidateBlock.java |  2 +-
 .../hdfs/server/blockmanagement/TestPendingReconstruction.java |  2 +-
 .../hdfs/server/blockmanagement/TestRBWBlockInvalidation.java  |  2 +-
 .../hdfs/server/blockmanagement/TestSlowDiskTracker.java   |  2 +-
 .../apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java  |  2 +-
 .../apache/hadoop/hdfs/server/datanode/TestBPOfferService.java |  2 +-
 .../apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java  |  2 +-
 .../apache/hadoop/hdfs/server/datanode/TestBlockScanner.java   |  2 +-
 .../hadoop/hdfs/server/datanode/TestCorruptMetadataFile.java   |  2 +-
 .../hadoop/hdfs/server/datanode/TestDataNodeLifeline.java  |  2 +-
 .../apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java |  2 +-
 

[hadoop] branch branch-3.1 updated: HADOOP-17100. Replace Guava Supplier with Java8+ Supplier in Hadoop. Contributed by Ahmed Hussein.

2020-07-22 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 4592af8  HADOOP-17100. Replace Guava Supplier with Java8+ Supplier in 
Hadoop. Contributed by Ahmed Hussein.
4592af8 is described below

commit 4592af898bf2fa634736b04fab5b2d2499517567
Author: Ayush Saxena 
AuthorDate: Wed Jul 22 19:05:13 2020 +0530

HADOOP-17100. Replace Guava Supplier with Java8+ Supplier in Hadoop. 
Contributed by Ahmed Hussein.
---
 .../src/main/resources/checkstyle/checkstyle.xml  |  2 +-
 .../java/org/apache/hadoop/conf/TestReconfiguration.java  |  2 +-
 .../java/org/apache/hadoop/fs/FCStatisticsBaseTest.java   |  2 +-
 .../org/apache/hadoop/ha/TestZKFailoverController.java|  2 +-
 .../src/test/java/org/apache/hadoop/ipc/TestIPC.java  |  2 +-
 .../hadoop/metrics2/impl/TestMetricsSystemImpl.java   |  2 +-
 .../hadoop/metrics2/lib/TestMutableRollingAverages.java   |  2 +-
 .../org/apache/hadoop/security/TestGroupsCaching.java |  2 +-
 .../security/ssl/TestReloadingX509TrustManager.java   |  2 +-
 .../delegation/TestZKDelegationTokenSecretManager.java|  2 +-
 .../java/org/apache/hadoop/test/GenericTestUtils.java | 10 +-
 .../java/org/apache/hadoop/test/TestGenericTestUtils.java |  2 +-
 .../java/org/apache/hadoop/tracing/SetSpanReceiver.java   |  2 +-
 .../src/test/java/org/apache/hadoop/util/TestShell.java   |  2 +-
 .../org/apache/hadoop/crypto/key/kms/server/TestKMS.java  |  2 +-
 .../apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java  |  2 +-
 .../hdfs/server/federation/FederationTestUtils.java   |  2 +-
 .../hdfs/server/federation/router/TestRouterAdminCLI.java |  2 +-
 .../hdfs/server/federation/router/TestRouterQuota.java|  2 +-
 .../federation/router/TestRouterRPCClientRetries.java |  2 +-
 .../hdfs/server/federation/router/TestRouterRpc.java  |  2 +-
 .../apache/hadoop/fs/TestEnhancedByteBufferAccess.java|  2 +-
 .../hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java  |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java |  2 +-
 .../test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java  |  2 +-
 .../org/apache/hadoop/hdfs/TestBalancerBandwidth.java |  2 +-
 .../hdfs/TestClientProtocolForPipelineRecovery.java   |  2 +-
 .../test/java/org/apache/hadoop/hdfs/TestDFSShell.java|  2 +-
 .../org/apache/hadoop/hdfs/TestDataTransferKeepalive.java |  2 +-
 .../org/apache/hadoop/hdfs/TestDatanodeRegistration.java  |  2 +-
 .../java/org/apache/hadoop/hdfs/TestDecommission.java |  2 +-
 .../org/apache/hadoop/hdfs/TestEncryptedTransfer.java |  2 +-
 .../apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java|  2 +-
 .../java/org/apache/hadoop/hdfs/TestFileCorruption.java   |  2 +-
 .../java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java   |  2 +-
 .../org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java  |  5 +++--
 .../java/org/apache/hadoop/hdfs/TestMaintenanceState.java |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestPread.java   |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestQuota.java   |  2 +-
 .../apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java  |  2 +-
 .../test/java/org/apache/hadoop/hdfs/TestReplication.java |  2 +-
 .../test/java/org/apache/hadoop/hdfs/TestSafeMode.java|  2 +-
 .../hdfs/client/impl/TestBlockReaderLocalMetrics.java |  2 +-
 .../apache/hadoop/hdfs/qjournal/MiniJournalCluster.java   |  2 +-
 .../hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java |  2 +-
 .../hadoop/hdfs/qjournal/server/TestJournalNodeSync.java  |  2 +-
 .../server/blockmanagement/TestBlockManagerSafeMode.java  |  2 +-
 .../blockmanagement/TestBlockReportRateLimiting.java  |  2 +-
 .../TestNameNodePrunesMissingStorages.java|  2 +-
 .../blockmanagement/TestPendingInvalidateBlock.java   |  2 +-
 .../server/blockmanagement/TestPendingReconstruction.java |  2 +-
 .../server/blockmanagement/TestRBWBlockInvalidation.java  |  2 +-
 .../hdfs/server/blockmanagement/TestSlowDiskTracker.java  |  2 +-
 .../hadoop/hdfs/server/datanode/DataNodeTestUtils.java|  2 +-
 .../hadoop/hdfs/server/datanode/TestBPOfferService.java   |  2 +-
 .../hadoop/hdfs/server/datanode/TestBlockRecovery.java|  2 +-
 .../hadoop/hdfs/server/datanode/TestBlockScanner.java |  2 +-
 .../hdfs/server/datanode/TestCorruptMetadataFile.java |  2 +-
 .../hadoop/hdfs/server/datanode/TestDataNodeLifeline.java |  2 +-
 .../hadoop/hdfs/server/datanode/TestDataNodeMXBean.java   |  3 ++-
 .../hadoop/hdfs/server/datanode/TestDataNodeMetrics.java  |  2 +-
 .../hdfs/server/datanode/TestDataNodeMetricsLogger.java   |  2 +-
 .../hdfs/server/datanode/TestDataNodeVolumeFailure.java   |  2 +-
 .../server/datanode/TestDatanodeProtocolRetryPolicy.java  |  2 +-
 .../apache/hadoop/hdfs/server/datanode/TestDiskError.java |  2 +-
 

[hadoop] branch branch-3.3 updated: HADOOP-17100. Replace Guava Supplier with Java8+ Supplier in Hadoop. Contributed by Ahmed Hussein.

2020-07-22 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new e3b8d4e  HADOOP-17100. Replace Guava Supplier with Java8+ Supplier in 
Hadoop. Contributed by Ahmed Hussein.
e3b8d4e is described below

commit e3b8d4eb05d7334ad5d3084d4ddbadb8da309381
Author: Ayush Saxena 
AuthorDate: Wed Jul 22 18:21:14 2020 +0530

HADOOP-17100. Replace Guava Supplier with Java8+ Supplier in Hadoop. 
Contributed by Ahmed Hussein.
---
 .../src/main/resources/checkstyle/checkstyle.xml   |  2 +-
 .../test/java/org/apache/hadoop/conf/TestReconfiguration.java  |  2 +-
 .../test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java   |  2 +-
 .../java/org/apache/hadoop/ha/TestZKFailoverController.java|  2 +-
 .../java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java  |  2 +-
 .../src/test/java/org/apache/hadoop/ipc/TestIPC.java   |  2 +-
 .../org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java |  2 +-
 .../apache/hadoop/metrics2/lib/TestMutableRollingAverages.java |  2 +-
 .../java/org/apache/hadoop/security/TestGroupsCaching.java |  2 +-
 .../hadoop/security/ssl/TestReloadingX509TrustManager.java |  2 +-
 .../token/delegation/TestZKDelegationTokenSecretManager.java   |  2 +-
 .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java | 10 +-
 .../test/java/org/apache/hadoop/test/TestGenericTestUtils.java |  2 +-
 .../test/java/org/apache/hadoop/tracing/SetSpanReceiver.java   |  2 +-
 .../src/test/java/org/apache/hadoop/util/TestShell.java|  2 +-
 .../java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java  |  2 +-
 .../org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java   |  2 +-
 .../hadoop/hdfs/server/federation/FederationTestUtils.java |  2 +-
 .../hdfs/server/federation/router/TestRouterAdminCLI.java  |  2 +-
 .../hadoop/hdfs/server/federation/router/TestRouterQuota.java  |  2 +-
 .../server/federation/router/TestRouterRPCClientRetries.java   |  2 +-
 .../hadoop/hdfs/server/federation/router/TestRouterRpc.java|  2 +-
 .../org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java |  2 +-
 .../hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java   |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java  |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java   |  2 +-
 .../java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java |  2 +-
 .../hadoop/hdfs/TestClientProtocolForPipelineRecovery.java |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java |  2 +-
 .../java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java |  2 +-
 .../java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java  |  2 +-
 .../java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestDecommission.java |  2 +-
 .../java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java |  2 +-
 .../org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java |  2 +-
 .../test/java/org/apache/hadoop/hdfs/TestFileCorruption.java   |  2 +-
 .../test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java   |  2 +-
 .../java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java  |  2 +-
 .../test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestPread.java|  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestQuota.java|  2 +-
 .../org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java   |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestReplication.java  |  2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java |  2 +-
 .../hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java   |  2 +-
 .../org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java|  2 +-
 .../hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java  |  2 +-
 .../hadoop/hdfs/qjournal/server/TestJournalNodeSync.java   |  2 +-
 .../hdfs/server/blockmanagement/TestBlockManagerSafeMode.java  |  2 +-
 .../server/blockmanagement/TestBlockReportRateLimiting.java|  2 +-
 .../blockmanagement/TestNameNodePrunesMissingStorages.java |  2 +-
 .../server/blockmanagement/TestPendingInvalidateBlock.java |  2 +-
 .../hdfs/server/blockmanagement/TestPendingReconstruction.java |  2 +-
 .../hdfs/server/blockmanagement/TestRBWBlockInvalidation.java  |  2 +-
 .../hdfs/server/blockmanagement/TestSlowDiskTracker.java   |  2 +-
 .../apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java  |  2 +-
 .../apache/hadoop/hdfs/server/datanode/TestBPOfferService.java |  2 +-
 .../apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java  |  2 +-
 .../apache/hadoop/hdfs/server/datanode/TestBlockScanner.java   |  2 +-
 .../hadoop/hdfs/server/datanode/TestCorruptMetadataFile.java   |  2 +-
 

[hadoop] branch trunk updated: HDFS-15478: When Empty mount points, we are assigning fallback link to self. But it should not use full URI for target fs. (#2160). Contributed by Uma Maheswara Rao G.

2020-07-22 Thread umamahesh
This is an automated email from the ASF dual-hosted git repository.

umamahesh pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ac9a07b  HDFS-15478: When Empty mount points, we are assigning 
fallback link to self. But it should not use full URI for target fs. (#2160). 
Contributed by Uma Maheswara Rao G.
ac9a07b is described below

commit ac9a07b51aefd0fd3b4602adc844ab0f172835e3
Author: Uma Maheswara Rao G 
AuthorDate: Tue Jul 21 23:29:10 2020 -0700

HDFS-15478: When Empty mount points, we are assigning fallback link to 
self. But it should not use full URI for target fs. (#2160). Contributed by Uma 
Maheswara Rao G.
---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java|  2 +-
 .../viewfs/TestViewFsOverloadSchemeListStatus.java | 27 +++---
 .../src/site/markdown/ViewFsOverloadScheme.md  |  2 ++
 3 files changed, 22 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 1fc531e..baf0027 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -294,7 +294,7 @@ public class ViewFileSystem extends FileSystem {
   myUri = new URI(getScheme(), authority, "/", null, null);
   boolean initingUriAsFallbackOnNoMounts =
   !FsConstants.VIEWFS_TYPE.equals(getType());
-  fsState = new InodeTree(conf, tableName, theUri,
+  fsState = new InodeTree(conf, tableName, myUri,
   initingUriAsFallbackOnNoMounts) {
 @Override
 protected FileSystem getTargetFileSystem(final URI uri)
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java
index 300fdd8..7afc789 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java
@@ -127,19 +127,30 @@ public class TestViewFsOverloadSchemeListStatus {
 
   /**
* Tests that ViewFSOverloadScheme should consider initialized fs as fallback
-   * if there are no mount links configured.
+   * if there are no mount links configured. It should add fallback with the
+   * chrootedFS at it's uri's root.
*/
   @Test(timeout = 3)
   public void testViewFSOverloadSchemeWithoutAnyMountLinks() throws Exception {
-try (FileSystem fs = FileSystem.get(TEST_DIR.toPath().toUri(), conf)) {
+Path initUri = new Path(TEST_DIR.toURI().toString(), "init");
+try (FileSystem fs = FileSystem.get(initUri.toUri(), conf)) {
   ViewFileSystemOverloadScheme vfs = (ViewFileSystemOverloadScheme) fs;
   assertEquals(0, vfs.getMountPoints().length);
-  Path testFallBack = new Path("test", FILE_NAME);
-  assertTrue(vfs.mkdirs(testFallBack));
-  FileStatus[] status = vfs.listStatus(testFallBack.getParent());
-  assertEquals(FILE_NAME, status[0].getPath().getName());
-  assertEquals(testFallBack.getName(),
-  vfs.getFileLinkStatus(testFallBack).getPath().getName());
+  Path testOnFallbackPath = new Path(TEST_DIR.toURI().toString(), "test");
+  assertTrue(vfs.mkdirs(testOnFallbackPath));
+  FileStatus[] status = vfs.listStatus(testOnFallbackPath.getParent());
+  assertEquals(Path.getPathWithoutSchemeAndAuthority(testOnFallbackPath),
+  Path.getPathWithoutSchemeAndAuthority(status[0].getPath()));
+  //Check directly on localFS. The fallBackFs(localFS) should be chrooted
+  //at it's root. So, after
+  FileSystem lfs = vfs.getRawFileSystem(testOnFallbackPath, conf);
+  FileStatus[] statusOnLocalFS =
+  lfs.listStatus(testOnFallbackPath.getParent());
+  assertEquals(testOnFallbackPath.getName(),
+  statusOnLocalFS[0].getPath().getName());
+  //initUri should not have exist in lfs, as it would have chrooted on it's
+  // root only.
+  assertFalse(lfs.exists(initUri));
 }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md
index 564bc03..f3eb336 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md
@@ -34,6 +34,8 @@ If a user wants to continue use the same fs.defaultFS and 
wants to have more mou
 Example if fs.defaultFS is