[hadoop] branch branch-2.10 updated: HDFS-16198. Short circuit read leaks Slot objects when InvalidToken exception is thrown (#3359)

2021-09-15 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 30f48c4  HDFS-16198. Short circuit read leaks Slot objects when 
InvalidToken exception is thrown (#3359)
30f48c4 is described below

commit 30f48c4d0662a3e29a3881f60754da8cc0307665
Author: Kihwal Lee 
AuthorDate: Wed Sep 15 14:45:28 2021 -0500

HDFS-16198. Short circuit read leaks Slot objects when InvalidToken 
exception is thrown (#3359)

Reviewed-by: He Xiaoqiao 
Reviewed-by: Wei-Chiu Chuang 
(cherry picked from commit c4c5883d8bf1fdc330e1da4d93eba760fa70c0e8)
---
 .../hdfs/client/impl/BlockReaderFactory.java   |   3 +
 .../TestBlockTokenWithShortCircuitRead.java| 203 +
 2 files changed, 206 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index 61e46b1..708a046 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -644,6 +644,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   "attempting to set up short-circuit access to " +
   fileName + resp.getMessage();
   LOG.debug("{}:{}", this, msg);
+  if (slot != null) {
+cache.freeSlot(slot);
+  }
   return new ShortCircuitReplicaInfo(new InvalidToken(msg));
 default:
   final long expiration =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithShortCircuitRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithShortCircuitRead.java
new file mode 100644
index 000..0a0fb11
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithShortCircuitRead.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
+import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.shortcircuit.DfsClientShm;
+import 
org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo;
+import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor;
+import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
+import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
+import org.apache.hadoop.net.unix.DomainSocket;
+import org.apache.hadoop.net.unix.TemporarySocketDirectory;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.event.Level;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT

[hadoop] branch branch-3.2 updated: HDFS-16127. Improper pipeline close recovery causes a permanent write failure or data loss. Contributed by Kihwal Lee.

2021-07-16 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 70c5e9e  HDFS-16127. Improper pipeline close recovery causes a 
permanent write failure or data loss. Contributed by Kihwal Lee.
70c5e9e is described below

commit 70c5e9e7803e77955e1bc94ce9d96961aafd44e9
Author: Kihwal Lee 
AuthorDate: Fri Jul 16 14:24:52 2021 -0500

HDFS-16127. Improper pipeline close recovery causes a permanent write 
failure or data loss. Contributed by Kihwal Lee.

(cherry picked from commit 47002719f2aa4ff58378d528d38b0f0962a45c25)
---
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index ade8f79..0e21516 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -778,7 +778,19 @@ class DataStreamer extends Daemon {
 // Is this block full?
 if (one.isLastPacketInBlock()) {
   // wait for the close packet has been acked
-  waitForAllAcks();
+  try {
+waitForAllAcks();
+  } catch (IOException ioe) {
+// No need to do a close recovery if the last packet was acked.
+// i.e. ackQueue is empty.  waitForAllAcks() can get an exception
+// (e.g. connection reset) while sending a heartbeat packet,
+// if the DN sends the final ack and closes the connection.
+synchronized (dataQueue) {
+  if (!ackQueue.isEmpty()) {
+throw ioe;
+  }
+}
+  }
   if (shouldStop()) {
 continue;
   }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-16127. Improper pipeline close recovery causes a permanent write failure or data loss. Contributed by Kihwal Lee.

2021-07-16 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 95a70c0  HDFS-16127. Improper pipeline close recovery causes a 
permanent write failure or data loss. Contributed by Kihwal Lee.
95a70c0 is described below

commit 95a70c0fdfb2a2534a54d01012c49d4ff23117f6
Author: Kihwal Lee 
AuthorDate: Fri Jul 16 14:23:36 2021 -0500

HDFS-16127. Improper pipeline close recovery causes a permanent write 
failure or data loss. Contributed by Kihwal Lee.

(cherry picked from commit 47002719f2aa4ff58378d528d38b0f0962a45c25)
---
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 110261b..e1d104e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -778,7 +778,19 @@ class DataStreamer extends Daemon {
 // Is this block full?
 if (one.isLastPacketInBlock()) {
   // wait for the close packet has been acked
-  waitForAllAcks();
+  try {
+waitForAllAcks();
+  } catch (IOException ioe) {
+// No need to do a close recovery if the last packet was acked.
+// i.e. ackQueue is empty.  waitForAllAcks() can get an exception
+// (e.g. connection reset) while sending a heartbeat packet,
+// if the DN sends the final ack and closes the connection.
+synchronized (dataQueue) {
+  if (!ackQueue.isEmpty()) {
+throw ioe;
+  }
+}
+  }
   if (shouldStop()) {
 continue;
   }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-16127. Improper pipeline close recovery causes a permanent write failure or data loss. Contributed by Kihwal Lee.

2021-07-16 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4700271  HDFS-16127. Improper pipeline close recovery causes a 
permanent write failure or data loss. Contributed by Kihwal Lee.
4700271 is described below

commit 47002719f2aa4ff58378d528d38b0f0962a45c25
Author: Kihwal Lee 
AuthorDate: Fri Jul 16 14:22:39 2021 -0500

HDFS-16127. Improper pipeline close recovery causes a permanent write 
failure or data loss. Contributed by Kihwal Lee.
---
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 4b5f3c3..93446c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -783,7 +783,19 @@ class DataStreamer extends Daemon {
 // Is this block full?
 if (one.isLastPacketInBlock()) {
   // wait for the close packet has been acked
-  waitForAllAcks();
+  try {
+waitForAllAcks();
+  } catch (IOException ioe) {
+// No need to do a close recovery if the last packet was acked.
+// i.e. ackQueue is empty.  waitForAllAcks() can get an exception
+// (e.g. connection reset) while sending a heartbeat packet,
+// if the DN sends the final ack and closes the connection.
+synchronized (dataQueue) {
+  if (!ackQueue.isEmpty()) {
+throw ioe;
+  }
+}
+  }
   if (shouldStop()) {
 continue;
   }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

2021-06-16 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new ab6b568  HDFS-15618. Improve datanode shutdown latency. Contributed by 
Ahmed Hussein.
ab6b568 is described below

commit ab6b5681e8baf43b5d6a50cc42c65c7a7a1760d7
Author: Kihwal Lee 
AuthorDate: Wed Jun 16 11:38:30 2021 -0500

HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  10 ++
 .../hadoop/hdfs/server/datanode/BlockScanner.java  |  33 -
 .../hadoop/hdfs/server/datanode/DataNode.java  |   4 +-
 .../hadoop/hdfs/server/datanode/VolumeScanner.java |   3 +
 .../server/datanode/VolumeScannerCBInjector.java   |  51 +++
 .../src/main/resources/hdfs-default.xml|   9 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java |  29 +++-
 .../hdfs/server/datanode/TestBlockScanner.java | 148 +
 8 files changed, 280 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1e53a2e..e71ed95 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -600,6 +602,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 
24;  // 3 weeks.
   public static final String  DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = 
"dfs.block.scanner.volume.bytes.per.second";
   public static final long
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
+  /**
+   * The amount of time in milliseconds that the BlockScanner times out waiting
+   * for the VolumeScanner thread to join during a shutdown call.
+   */
+  public static final String  DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY =
+  "dfs.block.scanner.volume.join.timeout.ms";
+  public static final long DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
   public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = 
"dfs.datanode.transferTo.allowed";
   public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
   public static final String  DFS_HEARTBEAT_INTERVAL_KEY = 
"dfs.heartbeat.interval";
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 3d97022..072f69d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
 
@@ -66,6 +68,12 @@ public class BlockScanner {
*/
   private Conf conf;
 
+  /**
+   * Timeout duration in milliseconds waiting for {@link VolumeScanner} to stop
+   * inside {@link #removeAllVolumeScanners}.
+   */
+  private long joinVolumeScannersTimeOutMs;
+
   @VisibleForTesting
   void setConf(Conf conf) {
 this.conf = conf;
@@ -179,6 +187,9 @@ public class BlockScanner {
 
   public BlockScanner(DataNode datanode, Configuration conf) {
 this.datanode = datanode;
+setJoinVolumeScannersTimeOutMs(
+conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
+DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT));
 this.conf = new Conf(conf);
 if (isEnabled()) {
   LOG.info("Initialized block scanner with targetBytesPerSec {}",
@@ -198,6 +209,13 @@ public class BlockScanner {
 return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec &

[hadoop] branch branch-2.10 updated: HDFS-15963. Unreleased volume references cause an infinite loop. (#2941) Contributed by Shuyan Zhang.

2021-06-15 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new e7c7fb2  HDFS-15963. Unreleased volume references cause an infinite 
loop. (#2941) Contributed by Shuyan Zhang.
e7c7fb2 is described below

commit e7c7fb2896dc7b07b796075475edb24b840e9503
Author: Kihwal Lee 
AuthorDate: Tue Jun 15 19:57:21 2021 -0500

HDFS-15963. Unreleased volume references cause an infinite loop. (#2941) 
Contributed by Shuyan Zhang.

Reviewed-by: Wei-Chiu Chuang 
Reviewed-by: He Xiaoqiao 
(cherry picked from commit 9f2db2c9fdcf1e61b39651a51f8223cfb8ce2e31)
---
 .../hadoop/hdfs/server/datanode/BlockSender.java   |  1 +
 .../fsdataset/impl/FsDatasetAsyncDiskService.java  | 65 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java  |  2 +-
 .../impl/RamDiskAsyncLazyPersistService.java   | 27 ++---
 .../hadoop/hdfs/TestDataTransferProtocol.java  | 55 ++
 .../datanode/fsdataset/impl/TestFsDatasetImpl.java | 33 +++
 .../fsdataset/impl/TestLazyPersistFiles.java   | 37 
 7 files changed, 183 insertions(+), 37 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index ff81b5a..db19128 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -420,6 +420,7 @@ class BlockSender implements java.io.Closeable {
   ris = new ReplicaInputStreams(
   blockIn, checksumIn, volumeRef, fileIoProvider);
 } catch (IOException ioe) {
+  IOUtils.cleanupWithLogger(null, volumeRef);
   IOUtils.closeStream(this);
   org.apache.commons.io.IOUtils.closeQuietly(blockIn);
   org.apache.commons.io.IOUtils.closeQuietly(checksumIn);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
index 10ee9cf..93c8ea6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
@@ -161,15 +161,23 @@ class FsDatasetAsyncDiskService {
* Execute the task sometime in the future, using ThreadPools.
*/
   synchronized void execute(File root, Runnable task) {
-if (executors == null) {
-  throw new RuntimeException("AsyncDiskService is already shutdown");
-}
-ThreadPoolExecutor executor = executors.get(root);
-if (executor == null) {
-  throw new RuntimeException("Cannot find root " + root
-  + " for execution of task " + task);
-} else {
-  executor.execute(task);
+try {
+  if (executors == null) {
+throw new RuntimeException("AsyncDiskService is already shutdown");
+  }
+  ThreadPoolExecutor executor = executors.get(root);
+  if (executor == null) {
+throw new RuntimeException("Cannot find root " + root
++ " for execution of task " + task);
+  } else {
+executor.execute(task);
+  }
+} catch (RuntimeException re) {
+  if (task instanceof ReplicaFileDeleteTask) {
+IOUtils.cleanupWithLogger(null,
+((ReplicaFileDeleteTask) task).volumeRef);
+  }
+  throw re;
 }
   }
   
@@ -301,28 +309,31 @@ class FsDatasetAsyncDiskService {
 
 @Override
 public void run() {
-  final long blockLength = blockFile.length();
-  final long metaLength = metaFile.length();
-  boolean result;
+  try {
+final long blockLength = blockFile.length();
+final long metaLength = metaFile.length();
+boolean result;
 
-  result = (trashDirectory == null) ? deleteFiles() : moveFiles();
+result = (trashDirectory == null) ? deleteFiles() : moveFiles();
 
-  if (!result) {
-LOG.warn("Unexpected error trying to "
-+ (trashDirectory == null ? "delete" : "move")
-+ " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
-+ " at file " + blockFile + ". Ignored.");
-  } else {
-if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
-  datanode.notifyNamenodeDeletedBlock(block, volume.getSto

[hadoop] branch trunk updated: HDFS-16042. DatanodeAdminMonitor scan should be delay based (#3058)

2021-06-08 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a2a0283  HDFS-16042. DatanodeAdminMonitor scan should be delay based 
(#3058)
a2a0283 is described below

commit a2a0283c7be8eac641a256f06731cb6e4bab3b09
Author: Ahmed Hussein <50450311+amahuss...@users.noreply.github.com>
AuthorDate: Tue Jun 8 11:09:31 2021 -0500

HDFS-16042. DatanodeAdminMonitor scan should be delay based (#3058)
---
 .../apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
index 8cad44f..70ae44a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
@@ -149,7 +149,7 @@ public class DatanodeAdminManager {
   throw new RuntimeException("Unable to create the Decommission monitor " +
   "from "+cls, e);
 }
-executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs,
+executor.scheduleWithFixedDelay(monitor, intervalSecs, intervalSecs,
 TimeUnit.SECONDS);
 
 LOG.debug("Activating DatanodeAdminManager with interval {} seconds, " +

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-15799. Make DisallowedDatanodeException terse. Contributed by Richard Ross.

2021-02-03 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 7c18c77  HDFS-15799. Make DisallowedDatanodeException terse. 
Contributed by Richard Ross.
7c18c77 is described below

commit 7c18c77d1633454858153f09bde7b8036b26350c
Author: Kihwal Lee 
AuthorDate: Wed Feb 3 09:03:31 2021 -0600

HDFS-15799. Make DisallowedDatanodeException terse. Contributed by Richard 
Ross.

(cherry picked from commit 182623d2bc73cac4764149fcc9c7d94c1541f89c)
---
 .../org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index aea48ce..c2ca159 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -226,6 +226,7 @@ import org.slf4j.Logger;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
+import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
 
 import javax.annotation.Nonnull;
 
@@ -540,7 +541,8 @@ public class NameNodeRpcServer implements NamenodeProtocols 
{
 QuotaByStorageTypeExceededException.class,
 AclException.class,
 FSLimitException.PathComponentTooLongException.class,
-FSLimitException.MaxDirectoryItemsExceededException.class);
+FSLimitException.MaxDirectoryItemsExceededException.class,
+DisallowedDatanodeException.class);
 
 clientRpcServer.addSuppressedLoggingExceptions(StandbyException.class,
 UnresolvedPathException.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-15799. Make DisallowedDatanodeException terse. Contributed by Richard Ross.

2021-02-03 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 96773ec  HDFS-15799. Make DisallowedDatanodeException terse. 
Contributed by Richard Ross.
96773ec is described below

commit 96773ec81b3ef5e10a5affadb588a6c5de1a2140
Author: Kihwal Lee 
AuthorDate: Wed Feb 3 08:58:10 2021 -0600

HDFS-15799. Make DisallowedDatanodeException terse. Contributed by Richard 
Ross.

(cherry picked from commit 182623d2bc73cac4764149fcc9c7d94c1541f89c)
---
 .../org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 54b75bf..1e248ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -228,6 +228,7 @@ import org.slf4j.Logger;
 
 import 
org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.thirdparty.protobuf.BlockingService;
+import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
 
 import javax.annotation.Nonnull;
 
@@ -542,7 +543,8 @@ public class NameNodeRpcServer implements NamenodeProtocols 
{
 QuotaByStorageTypeExceededException.class,
 AclException.class,
 FSLimitException.PathComponentTooLongException.class,
-FSLimitException.MaxDirectoryItemsExceededException.class);
+FSLimitException.MaxDirectoryItemsExceededException.class,
+DisallowedDatanodeException.class);
 
 clientRpcServer.addSuppressedLoggingExceptions(StandbyException.class,
 UnresolvedPathException.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15799. Make DisallowedDatanodeException terse. Contributed by Richard Ross.

2021-02-03 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 182623d  HDFS-15799. Make DisallowedDatanodeException terse. 
Contributed by Richard Ross.
182623d is described below

commit 182623d2bc73cac4764149fcc9c7d94c1541f89c
Author: Kihwal Lee 
AuthorDate: Wed Feb 3 08:55:50 2021 -0600

HDFS-15799. Make DisallowedDatanodeException terse. Contributed by Richard 
Ross.
---
 .../org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 51f5921..f4ab8f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -225,6 +225,7 @@ import org.slf4j.Logger;
 
 import 
org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.thirdparty.protobuf.BlockingService;
+import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
 
 import javax.annotation.Nonnull;
 
@@ -530,7 +531,8 @@ public class NameNodeRpcServer implements NamenodeProtocols 
{
 QuotaByStorageTypeExceededException.class,
 AclException.class,
 FSLimitException.PathComponentTooLongException.class,
-FSLimitException.MaxDirectoryItemsExceededException.class);
+FSLimitException.MaxDirectoryItemsExceededException.class,
+DisallowedDatanodeException.class);
 
 clientRpcServer.addSuppressedLoggingExceptions(StandbyException.class,
 UnresolvedPathException.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-17360. Log the remote address for authentication success (#2441)

2020-11-09 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1ea3f74  HADOOP-17360. Log the remote address for authentication 
success (#2441)
1ea3f74 is described below

commit 1ea3f74246294c280a2ccb0ff3e90b5721c5f0e2
Author: Ahmed Hussein <50450311+amahuss...@users.noreply.github.com>
AuthorDate: Mon Nov 9 14:05:08 2020 -0600

HADOOP-17360. Log the remote address for authentication success (#2441)

Co-authored-by: ahussein 
---
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 25f3394..3afad21 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -2043,7 +2043,7 @@ public abstract class Server {
 LOG.debug("SASL server successfully authenticated client: " + 
user);
   }
   rpcMetrics.incrAuthenticationSuccesses();
-  AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user);
+  AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user + " from " + toString());
   saslContextEstablished = true;
 }
   } catch (RpcServerException rse) { // don't re-wrap


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-15622. Deleted blocks linger in the replications queue. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 7a3085d  HDFS-15622. Deleted blocks linger in the replications queue. 
Contributed by Ahmed Hussein.
7a3085d is described below

commit 7a3085d552c4b24cdf23da201a300928ada7b8fd
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 21:01:09 2020 -0500

HDFS-15622. Deleted blocks linger in the replications queue. Contributed by 
Ahmed Hussein.

(cherry picked from commit da1b6e3cc286db00b385f3280627d2b2063b4e59)
---
 .../blockmanagement/LowRedundancyBlocks.java   | 30 ++
 .../TestLowRedundancyBlockQueues.java  | 47 +-
 .../blockmanagement/TestReplicationPolicy.java | 17 ++--
 3 files changed, 81 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index f6ef248..d719e93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -500,6 +501,8 @@ class LowRedundancyBlocks implements Iterable {
* the block count is met or iteration reaches the end of the lowest priority
* list, in which case bookmarks for each block list are reset to the heads
* of their respective lists.
+   * If a block is deleted (has invalid bcId), it will be removed from the low
+   * redundancy queues.
*
* @param blocksToProcess - number of blocks to fetch from low redundancy
*  blocks.
@@ -515,21 +518,32 @@ class LowRedundancyBlocks implements Iterable {
 
 int count = 0;
 int priority = 0;
+HashSet toRemove = new HashSet<>();
 for (; count < blocksToProcess && priority < LEVEL; priority++) {
-  if (priority == QUEUE_WITH_CORRUPT_BLOCKS) {
-// do not choose corrupted blocks.
-continue;
-  }
-
   // Go through all blocks that need reconstructions with current priority.
   // Set the iterator to the first unprocessed block at this priority level
+  // We do not want to skip QUEUE_WITH_CORRUPT_BLOCKS because we still need
+  // to look for deleted blocks if any.
+  final boolean inCorruptLevel = (QUEUE_WITH_CORRUPT_BLOCKS == priority);
   final Iterator i = priorityQueues.get(priority).getBookmark();
   final List blocks = new LinkedList<>();
-  blocksToReconstruct.add(blocks);
-  // Loop through all remaining blocks in the list.
+  if (!inCorruptLevel) {
+blocksToReconstruct.add(blocks);
+  }
   for(; count < blocksToProcess && i.hasNext(); count++) {
-blocks.add(i.next());
+BlockInfo block = i.next();
+if (block.isDeleted()) {
+  toRemove.add(block);
+  continue;
+}
+if (!inCorruptLevel) {
+  blocks.add(block);
+}
+  }
+  for (BlockInfo bInfo : toRemove) {
+remove(bInfo, priority);
   }
+  toRemove.clear();
 }
 
 if (priority == LEVEL || resetIterators) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index e63a8d8..ef614fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -41,6 +42,7 @@ import static org.junit.Assert.fail;
 public class TestLowRedundancyBlockQueues {
 
   private final ErasureCodingPolicy ecPolicy;
+  private static AtomicLong mockINodeId = new AtomicLong(0);
 
   public TestLowRedundancyBlockQueues(ErasureCodingPolicy policy) {
 ecPolicy = policy;
@@ -52,7 +54,15 @@ public class TestLowRedundancyBlockQueues {
   }
 
   private BlockInfo genBlockInfo(long id) {
-return

[hadoop] branch branch-3.2 updated: HDFS-15622. Deleted blocks linger in the replications queue. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new f363c3b  HDFS-15622. Deleted blocks linger in the replications queue. 
Contributed by Ahmed Hussein.
f363c3b is described below

commit f363c3b315472e45fd1d0f8225e31184b6c353a1
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 19:48:42 2020 -0500

HDFS-15622. Deleted blocks linger in the replications queue. Contributed by 
Ahmed Hussein.

(cherry picked from commit da1b6e3cc286db00b385f3280627d2b2063b4e59)
---
 .../blockmanagement/LowRedundancyBlocks.java   | 30 ++
 .../TestLowRedundancyBlockQueues.java  | 47 +-
 .../blockmanagement/TestReplicationPolicy.java | 17 ++--
 3 files changed, 81 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index f6ef248..d719e93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -500,6 +501,8 @@ class LowRedundancyBlocks implements Iterable {
* the block count is met or iteration reaches the end of the lowest priority
* list, in which case bookmarks for each block list are reset to the heads
* of their respective lists.
+   * If a block is deleted (has invalid bcId), it will be removed from the low
+   * redundancy queues.
*
* @param blocksToProcess - number of blocks to fetch from low redundancy
*  blocks.
@@ -515,21 +518,32 @@ class LowRedundancyBlocks implements Iterable {
 
 int count = 0;
 int priority = 0;
+HashSet toRemove = new HashSet<>();
 for (; count < blocksToProcess && priority < LEVEL; priority++) {
-  if (priority == QUEUE_WITH_CORRUPT_BLOCKS) {
-// do not choose corrupted blocks.
-continue;
-  }
-
   // Go through all blocks that need reconstructions with current priority.
   // Set the iterator to the first unprocessed block at this priority level
+  // We do not want to skip QUEUE_WITH_CORRUPT_BLOCKS because we still need
+  // to look for deleted blocks if any.
+  final boolean inCorruptLevel = (QUEUE_WITH_CORRUPT_BLOCKS == priority);
   final Iterator i = priorityQueues.get(priority).getBookmark();
   final List blocks = new LinkedList<>();
-  blocksToReconstruct.add(blocks);
-  // Loop through all remaining blocks in the list.
+  if (!inCorruptLevel) {
+blocksToReconstruct.add(blocks);
+  }
   for(; count < blocksToProcess && i.hasNext(); count++) {
-blocks.add(i.next());
+BlockInfo block = i.next();
+if (block.isDeleted()) {
+  toRemove.add(block);
+  continue;
+}
+if (!inCorruptLevel) {
+  blocks.add(block);
+}
+  }
+  for (BlockInfo bInfo : toRemove) {
+remove(bInfo, priority);
   }
+  toRemove.clear();
 }
 
 if (priority == LEVEL || resetIterators) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index e63a8d8..ef614fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -41,6 +42,7 @@ import static org.junit.Assert.fail;
 public class TestLowRedundancyBlockQueues {
 
   private final ErasureCodingPolicy ecPolicy;
+  private static AtomicLong mockINodeId = new AtomicLong(0);
 
   public TestLowRedundancyBlockQueues(ErasureCodingPolicy policy) {
 ecPolicy = policy;
@@ -52,7 +54,15 @@ public class TestLowRedundancyBlockQueues {
   }
 
   private BlockInfo genBlockInfo(long id) {
-return

[hadoop] branch branch-3.3 updated: HDFS-15622. Deleted blocks linger in the replications queue. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 02709cb  HDFS-15622. Deleted blocks linger in the replications queue. 
Contributed by Ahmed Hussein.
02709cb is described below

commit 02709cb054509677795cff5de92e1cbb0edc1c88
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 19:41:38 2020 -0500

HDFS-15622. Deleted blocks linger in the replications queue. Contributed by 
Ahmed Hussein.

(cherry picked from commit da1b6e3cc286db00b385f3280627d2b2063b4e59)
---
 .../blockmanagement/LowRedundancyBlocks.java   | 30 ++
 .../TestLowRedundancyBlockQueues.java  | 47 +-
 .../blockmanagement/TestReplicationPolicy.java | 17 ++--
 3 files changed, 81 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index f6ef248..d719e93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -500,6 +501,8 @@ class LowRedundancyBlocks implements Iterable {
* the block count is met or iteration reaches the end of the lowest priority
* list, in which case bookmarks for each block list are reset to the heads
* of their respective lists.
+   * If a block is deleted (has invalid bcId), it will be removed from the low
+   * redundancy queues.
*
* @param blocksToProcess - number of blocks to fetch from low redundancy
*  blocks.
@@ -515,21 +518,32 @@ class LowRedundancyBlocks implements Iterable {
 
 int count = 0;
 int priority = 0;
+HashSet toRemove = new HashSet<>();
 for (; count < blocksToProcess && priority < LEVEL; priority++) {
-  if (priority == QUEUE_WITH_CORRUPT_BLOCKS) {
-// do not choose corrupted blocks.
-continue;
-  }
-
   // Go through all blocks that need reconstructions with current priority.
   // Set the iterator to the first unprocessed block at this priority level
+  // We do not want to skip QUEUE_WITH_CORRUPT_BLOCKS because we still need
+  // to look for deleted blocks if any.
+  final boolean inCorruptLevel = (QUEUE_WITH_CORRUPT_BLOCKS == priority);
   final Iterator i = priorityQueues.get(priority).getBookmark();
   final List blocks = new LinkedList<>();
-  blocksToReconstruct.add(blocks);
-  // Loop through all remaining blocks in the list.
+  if (!inCorruptLevel) {
+blocksToReconstruct.add(blocks);
+  }
   for(; count < blocksToProcess && i.hasNext(); count++) {
-blocks.add(i.next());
+BlockInfo block = i.next();
+if (block.isDeleted()) {
+  toRemove.add(block);
+  continue;
+}
+if (!inCorruptLevel) {
+  blocks.add(block);
+}
+  }
+  for (BlockInfo bInfo : toRemove) {
+remove(bInfo, priority);
   }
+  toRemove.clear();
 }
 
 if (priority == LEVEL || resetIterators) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index c40f277..e33e24f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -41,6 +42,7 @@ import static org.junit.Assert.fail;
 public class TestLowRedundancyBlockQueues {
 
   private final ErasureCodingPolicy ecPolicy;
+  private static AtomicLong mockINodeId = new AtomicLong(0);
 
   public TestLowRedundancyBlockQueues(ErasureCodingPolicy policy) {
 ecPolicy = policy;
@@ -52,7 +54,15 @@ public class TestLowRedundancyBlockQueues {
   }
 
   private BlockInfo genBlockInfo(long id) {
-return

[hadoop] branch trunk updated: HDFS-15622. Deleted blocks linger in the replications queue. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new da1b6e3  HDFS-15622. Deleted blocks linger in the replications queue. 
Contributed by Ahmed Hussein.
da1b6e3 is described below

commit da1b6e3cc286db00b385f3280627d2b2063b4e59
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 16:59:49 2020 -0500

HDFS-15622. Deleted blocks linger in the replications queue. Contributed by 
Ahmed Hussein.
---
 .../blockmanagement/LowRedundancyBlocks.java   | 30 ++
 .../TestLowRedundancyBlockQueues.java  | 47 +-
 .../blockmanagement/TestReplicationPolicy.java | 17 ++--
 3 files changed, 81 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index f6ef248..d719e93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -500,6 +501,8 @@ class LowRedundancyBlocks implements Iterable {
* the block count is met or iteration reaches the end of the lowest priority
* list, in which case bookmarks for each block list are reset to the heads
* of their respective lists.
+   * If a block is deleted (has invalid bcId), it will be removed from the low
+   * redundancy queues.
*
* @param blocksToProcess - number of blocks to fetch from low redundancy
*  blocks.
@@ -515,21 +518,32 @@ class LowRedundancyBlocks implements Iterable {
 
 int count = 0;
 int priority = 0;
+HashSet toRemove = new HashSet<>();
 for (; count < blocksToProcess && priority < LEVEL; priority++) {
-  if (priority == QUEUE_WITH_CORRUPT_BLOCKS) {
-// do not choose corrupted blocks.
-continue;
-  }
-
   // Go through all blocks that need reconstructions with current priority.
   // Set the iterator to the first unprocessed block at this priority level
+  // We do not want to skip QUEUE_WITH_CORRUPT_BLOCKS because we still need
+  // to look for deleted blocks if any.
+  final boolean inCorruptLevel = (QUEUE_WITH_CORRUPT_BLOCKS == priority);
   final Iterator i = priorityQueues.get(priority).getBookmark();
   final List blocks = new LinkedList<>();
-  blocksToReconstruct.add(blocks);
-  // Loop through all remaining blocks in the list.
+  if (!inCorruptLevel) {
+blocksToReconstruct.add(blocks);
+  }
   for(; count < blocksToProcess && i.hasNext(); count++) {
-blocks.add(i.next());
+BlockInfo block = i.next();
+if (block.isDeleted()) {
+  toRemove.add(block);
+  continue;
+}
+if (!inCorruptLevel) {
+  blocks.add(block);
+}
+  }
+  for (BlockInfo bInfo : toRemove) {
+remove(bInfo, priority);
   }
+  toRemove.clear();
 }
 
 if (priority == LEVEL || resetIterators) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index c40f277..e33e24f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -41,6 +42,7 @@ import static org.junit.Assert.fail;
 public class TestLowRedundancyBlockQueues {
 
   private final ErasureCodingPolicy ecPolicy;
+  private static AtomicLong mockINodeId = new AtomicLong(0);
 
   public TestLowRedundancyBlockQueues(ErasureCodingPolicy policy) {
 ecPolicy = policy;
@@ -52,7 +54,15 @@ public class TestLowRedundancyBlockQueues {
   }
 
   private BlockInfo genBlockInfo(long id) {
-return new BlockInfoContiguous(new Block(id), (short) 3);
+return genBlockInfo(id

[hadoop] branch branch-3.1 updated: HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 33db7c1  HDFS-15618. Improve datanode shutdown latency. Contributed by 
Ahmed Hussein.
33db7c1 is described below

commit 33db7c140b3d8945542fb021ad222dac4a0d38a8
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 15:26:34 2020 -0500

HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

(cherry picked from commit cf932a7e2d6182471df4eba1333737912a32534b)
(cherry picked from commit f3b2d85690aeb65da7540f48edd52ca7315d0b02)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  10 ++
 .../hadoop/hdfs/server/datanode/BlockScanner.java  |  33 -
 .../hadoop/hdfs/server/datanode/DataNode.java  |   4 +-
 .../hadoop/hdfs/server/datanode/VolumeScanner.java |   3 +
 .../server/datanode/VolumeScannerCBInjector.java   |  51 
 .../src/main/resources/hdfs-default.xml|   9 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java |  28 +++-
 .../hdfs/server/datanode/TestBlockScanner.java | 142 +
 8 files changed, 273 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index bfa35bd..5412212 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -30,6 +30,8 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalcul
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
 
+import java.util.concurrent.TimeUnit;
+
 /** 
  * This class contains constants for configuration keys and default values
  * used in hdfs.
@@ -731,6 +733,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 
24;  // 3 weeks.
   public static final String  DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = 
"dfs.block.scanner.volume.bytes.per.second";
   public static final long
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
+  /**
+   * The amount of time in milliseconds that the BlockScanner times out waiting
+   * for the VolumeScanner thread to join during a shutdown call.
+   */
+  public static final String  DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY =
+  "dfs.block.scanner.volume.join.timeout.ms";
+  public static final long DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
   public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = 
"dfs.datanode.transferTo.allowed";
   public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
   public static final String  DFS_HEARTBEAT_INTERVAL_KEY = 
"dfs.heartbeat.interval";
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 8081895..82753e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
 
@@ -66,6 +68,12 @@ public class BlockScanner {
*/
   private Conf conf;
 
+  /**
+   * Timeout duration in milliseconds waiting for {@link VolumeScanner} to stop
+   * inside {@link #removeAllVolumeScanners}.
+   */
+  private long joinVolumeScannersTimeOutMs;
+
   @VisibleForTesting
   void setConf(Conf conf) {
 this.conf = conf;
@@ -179,6 +187,9 @@ public class BlockScanner {
 
   public BlockScanner(DataNode datanode, Configuration conf) {
 this.datanode = datanode;
+setJoinVolumeScannersTimeOutMs(
+conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
+DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT));
 this.conf = new Conf(conf);

[hadoop] branch branch-3.2 updated: HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new f3b2d85  HDFS-15618. Improve datanode shutdown latency. Contributed by 
Ahmed Hussein.
f3b2d85 is described below

commit f3b2d85690aeb65da7540f48edd52ca7315d0b02
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 14:59:09 2020 -0500

HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

(cherry picked from commit cf932a7e2d6182471df4eba1333737912a32534b)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  10 ++
 .../hadoop/hdfs/server/datanode/BlockScanner.java  |  33 -
 .../hadoop/hdfs/server/datanode/DataNode.java  |   4 +-
 .../hadoop/hdfs/server/datanode/VolumeScanner.java |   3 +
 .../server/datanode/VolumeScannerCBInjector.java   |  51 
 .../src/main/resources/hdfs-default.xml|   9 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java |  28 +++-
 .../hdfs/server/datanode/TestBlockScanner.java | 142 +
 8 files changed, 273 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 2ba3850..e0d4306 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -31,6 +31,8 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalcul
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
 
+import java.util.concurrent.TimeUnit;
+
 /** 
  * This class contains constants for configuration keys and default values
  * used in hdfs.
@@ -787,6 +789,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 
24;  // 3 weeks.
   public static final String  DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = 
"dfs.block.scanner.volume.bytes.per.second";
   public static final long
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
+  /**
+   * The amount of time in milliseconds that the BlockScanner times out waiting
+   * for the VolumeScanner thread to join during a shutdown call.
+   */
+  public static final String  DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY =
+  "dfs.block.scanner.volume.join.timeout.ms";
+  public static final long DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
   public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = 
"dfs.datanode.transferTo.allowed";
   public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
   public static final String  DFS_HEARTBEAT_INTERVAL_KEY = 
"dfs.heartbeat.interval";
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 8081895..82753e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
 
@@ -66,6 +68,12 @@ public class BlockScanner {
*/
   private Conf conf;
 
+  /**
+   * Timeout duration in milliseconds waiting for {@link VolumeScanner} to stop
+   * inside {@link #removeAllVolumeScanners}.
+   */
+  private long joinVolumeScannersTimeOutMs;
+
   @VisibleForTesting
   void setConf(Conf conf) {
 this.conf = conf;
@@ -179,6 +187,9 @@ public class BlockScanner {
 
   public BlockScanner(DataNode datanode, Configuration conf) {
 this.datanode = datanode;
+setJoinVolumeScannersTimeOutMs(
+conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
+DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT));
 this.conf = new Conf(conf);
 if (isEnabled()) {
   LOG.info("Initialized block scanner with targe

[hadoop] branch branch-3.3 updated: HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new cf932a7  HDFS-15618. Improve datanode shutdown latency. Contributed by 
Ahmed Hussein.
cf932a7 is described below

commit cf932a7e2d6182471df4eba1333737912a32534b
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 09:55:28 2020 -0500

HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   8 ++
 .../hadoop/hdfs/server/datanode/BlockScanner.java  |  33 -
 .../hadoop/hdfs/server/datanode/DataNode.java  |   4 +-
 .../hadoop/hdfs/server/datanode/VolumeScanner.java |   3 +
 .../server/datanode/VolumeScannerCBInjector.java   |  51 
 .../src/main/resources/hdfs-default.xml|   9 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java |  28 +++-
 .../hdfs/server/datanode/TestBlockScanner.java | 142 +
 8 files changed, 271 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 4b8c27b..5264799 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -846,6 +846,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 
24;  // 3 weeks.
   public static final String  DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = 
"dfs.block.scanner.volume.bytes.per.second";
   public static final long
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
+  /**
+   * The amount of time in milliseconds that the BlockScanner times out waiting
+   * for the VolumeScanner thread to join during a shutdown call.
+   */
+  public static final String  DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY =
+  "dfs.block.scanner.volume.join.timeout.ms";
+  public static final long DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
   public static final String  DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED =
   "dfs.block.scanner.skip.recent.accessed";
   public static final boolean DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED_DEFAULT =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 82efcf8..dc619f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -22,6 +22,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_SKIP_RECENT
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
 
@@ -68,6 +70,12 @@ public class BlockScanner {
*/
   private Conf conf;
 
+  /**
+   * Timeout duration in milliseconds waiting for {@link VolumeScanner} to stop
+   * inside {@link #removeAllVolumeScanners}.
+   */
+  private long joinVolumeScannersTimeOutMs;
+
   @VisibleForTesting
   void setConf(Conf conf) {
 this.conf = conf;
@@ -185,6 +193,9 @@ public class BlockScanner {
 
   public BlockScanner(DataNode datanode, Configuration conf) {
 this.datanode = datanode;
+setJoinVolumeScannersTimeOutMs(
+conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
+DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT));
 this.conf = new Conf(conf);
 if (isEnabled()) {
   LOG.info("Initialized block scanner with targetBytesPerSec {}",
@@ -204,6 +215,13 @@ public class BlockScanner {
 return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec > 0);
   }
 
+  /**
+   * Returns true if there is any scanner thread registered.
+   */
+  public synchronized boolean hasAnyRegisteredScanner() {
+return !scanners.isEmpty();
+  }
+
  /**
   * Set up a scann

[hadoop] branch trunk updated: HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

2020-10-21 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 88a9f42  HDFS-15618. Improve datanode shutdown latency. Contributed by 
Ahmed Hussein.
88a9f42 is described below

commit 88a9f42f320e7c16cf0b0b424283f8e4486ef286
Author: Kihwal Lee 
AuthorDate: Wed Oct 21 00:59:35 2020 -0500

HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   8 ++
 .../hadoop/hdfs/server/datanode/BlockScanner.java  |  33 -
 .../hadoop/hdfs/server/datanode/DataNode.java  |   4 +-
 .../hadoop/hdfs/server/datanode/VolumeScanner.java |   3 +
 .../server/datanode/VolumeScannerCBInjector.java   |  51 
 .../src/main/resources/hdfs-default.xml|   9 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java |  28 +++-
 .../hdfs/server/datanode/TestBlockScanner.java | 142 +
 8 files changed, 271 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 6b242f0..f59455e 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -866,6 +866,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 
24;  // 3 weeks.
   public static final String  DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = 
"dfs.block.scanner.volume.bytes.per.second";
   public static final long
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
+  /**
+   * The amount of time in milliseconds that the BlockScanner times out waiting
+   * for the VolumeScanner thread to join during a shutdown call.
+   */
+  public static final String  DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY =
+  "dfs.block.scanner.volume.join.timeout.ms";
+  public static final long DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
   public static final String  DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED =
   "dfs.block.scanner.skip.recent.accessed";
   public static final boolean DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED_DEFAULT =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 2895233..485cf00 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -22,6 +22,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_SKIP_RECENT
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
 
@@ -68,6 +70,12 @@ public class BlockScanner {
*/
   private Conf conf;
 
+  /**
+   * Timeout duration in milliseconds waiting for {@link VolumeScanner} to stop
+   * inside {@link #removeAllVolumeScanners}.
+   */
+  private long joinVolumeScannersTimeOutMs;
+
   @VisibleForTesting
   void setConf(Conf conf) {
 this.conf = conf;
@@ -185,6 +193,9 @@ public class BlockScanner {
 
   public BlockScanner(DataNode datanode, Configuration conf) {
 this.datanode = datanode;
+setJoinVolumeScannersTimeOutMs(
+conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
+DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT));
 this.conf = new Conf(conf);
 if (isEnabled()) {
   LOG.info("Initialized block scanner with targetBytesPerSec {}",
@@ -204,6 +215,13 @@ public class BlockScanner {
 return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec > 0);
   }
 
+  /**
+   * Returns true if there is any scanner thread registered.
+   */
+  public synchronized boolean hasAnyRegisteredScanner() {
+return !scanners.isEmpty();
+  }
+
  /**
   * Set up a scanner for t

[hadoop] branch branch-3.1 updated: HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by Ahmed Hussein.

2020-10-16 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new ffe6e39  HDFS-15628. HttpFS server throws NPE if a file is a symlink. 
Contributed by Ahmed Hussein.
ffe6e39 is described below

commit ffe6e39c96fa1cb2ba8a162d840cdda0b398ea4e
Author: Kihwal Lee 
AuthorDate: Fri Oct 16 11:41:17 2020 -0500

HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by 
Ahmed Hussein.

(cherry picked from commit e45407128d4e9a9804c777c8f845ad41e1280177)
---
 .../hadoop/fs/http/client/HttpFSFileSystem.java|  9 +++--
 .../apache/hadoop/fs/http/server/FSOperations.java | 14 ++--
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  | 38 ++
 3 files changed, 57 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 20533cf..d329386 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -182,6 +182,7 @@ public class HttpFSFileSystem extends FileSystem
   public static final String XATTR_NAME_JSON = "name";
   public static final String XATTR_VALUE_JSON = "value";
   public static final String XATTRNAMES_JSON = "XAttrNames";
+  public static final String SYMLINK_JSON = "symlink";
 
   public static final String FILE_CHECKSUM_JSON = "FileChecksum";
   public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
@@ -1075,6 +1076,9 @@ public class HttpFSFileSystem extends FileSystem
 String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
 Path path = (pathSuffix.equals("")) ? parent : new Path(parent, 
pathSuffix);
 FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON));
+String symLinkValue =
+type == FILE_TYPE.SYMLINK ? (String) json.get(SYMLINK_JSON) : null;
+Path symLink = symLinkValue == null ? null : new Path(symLinkValue);
 long len = (Long) json.get(LENGTH_JSON);
 String owner = (String) json.get(OWNER_JSON);
 String group = (String) json.get(GROUP_JSON);
@@ -1099,11 +1103,12 @@ public class HttpFSFileSystem extends FileSystem
   new FsPermissionExtension(permission, aBit, eBit, ecBit);
   FileStatus fileStatus = new FileStatus(len, FILE_TYPE.DIRECTORY == type,
   replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
-  null, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
+  symLink, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
   return fileStatus;
 } else {
   return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
-  replication, blockSize, mTime, aTime, permission, owner, group, 
path);
+  replication, blockSize, mTime, aTime, permission, owner, group,
+  symLink, path);
 }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index a9a9c70..128a384 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem.FILE_TYPE;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -103,8 +104,17 @@ public class FSOperations {
 Map json = new LinkedHashMap();
 json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
 (emptyPathSuffix) ? "" : fileStatus.getPath().getName());
-json.put(HttpFSFileSystem.TYPE_JSON,
-HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
+FILE_TYPE fileType = HttpFSFileSystem.FILE_TYPE.getType(fileStatus);
+json.put(HttpFSFileSystem.TYPE_JSON, fileType.toString());
+if (fileType.equals(FILE_TYPE.SYMLINK)) {
+  // put the symlink into Json
+  try {
+json.put(HttpFSFileSystem.SYMLINK_JSON,
+fileStatus.getSymlink().getName());
+  } catch (IOException e) {
+// Can't happen.
+  }
+}
 json.put(HttpFSFileSystem.LENGTH_JSON, file

[hadoop] branch branch-3.1 updated: HDFS-15627. Log delete audits before collecting blocks.

2020-10-16 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 9aefc17  HDFS-15627. Log delete audits before collecting blocks.
9aefc17 is described below

commit 9aefc1759b8ca7f13813b3144af19de28d379bda
Author: Kihwal Lee 
AuthorDate: Fri Oct 16 11:18:42 2020 -0500

HDFS-15627. Log delete audits before collecting blocks.

(cherry picked from commit 740a2c46353f8005dbed6f5bc15f21acfc4a6a23)
---
 .../main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ddecab8..89765d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3052,10 +3052,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   writeUnlock(operationName);
 }
 getEditLog().logSync();
+logAuditEvent(true, operationName, src);
 if (toRemovedBlocks != null) {
   removeBlocks(toRemovedBlocks); // Incremental deletion of blocks
 }
-logAuditEvent(true, operationName, src);
 return ret;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-15627. Log delete audits before collecting blocks.

2020-10-16 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 7e4572e  HDFS-15627. Log delete audits before collecting blocks.
7e4572e is described below

commit 7e4572e56a79c508a726fffc04b05c3c0607bb46
Author: Kihwal Lee 
AuthorDate: Fri Oct 16 11:17:34 2020 -0500

HDFS-15627. Log delete audits before collecting blocks.

(cherry picked from commit 740a2c46353f8005dbed6f5bc15f21acfc4a6a23)
---
 .../main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a937998..259c6a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3111,10 +3111,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   writeUnlock(operationName);
 }
 getEditLog().logSync();
+logAuditEvent(true, operationName, src);
 if (toRemovedBlocks != null) {
   removeBlocks(toRemovedBlocks); // Incremental deletion of blocks
 }
-logAuditEvent(true, operationName, src);
 return ret;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-15627. Log delete audits before collecting blocks.

2020-10-16 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 433434d  HDFS-15627. Log delete audits before collecting blocks.
433434d is described below

commit 433434d509030a2907d20f60b2940f1a19993c91
Author: Kihwal Lee 
AuthorDate: Fri Oct 16 11:16:47 2020 -0500

HDFS-15627. Log delete audits before collecting blocks.

(cherry picked from commit 740a2c46353f8005dbed6f5bc15f21acfc4a6a23)
---
 .../main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 1e3741e..dcedcfa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3254,10 +3254,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   throw e;
 }
 getEditLog().logSync();
+logAuditEvent(true, operationName, src);
 if (toRemovedBlocks != null) {
   removeBlocks(toRemovedBlocks); // Incremental deletion of blocks
 }
-logAuditEvent(true, operationName, src);
 return ret;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15627. Log delete audits before collecting blocks.

2020-10-16 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 740a2c4  HDFS-15627. Log delete audits before collecting blocks.
740a2c4 is described below

commit 740a2c46353f8005dbed6f5bc15f21acfc4a6a23
Author: Kihwal Lee 
AuthorDate: Fri Oct 16 11:13:41 2020 -0500

HDFS-15627. Log delete audits before collecting blocks.
---
 .../main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 13ad4c4..19def5c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3321,10 +3321,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   throw e;
 }
 getEditLog().logSync();
+logAuditEvent(true, operationName, src);
 if (toRemovedBlocks != null) {
   removeBlocks(toRemovedBlocks); // Incremental deletion of blocks
 }
-logAuditEvent(true, operationName, src);
 return ret;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by Ahmed Hussein.

2020-10-14 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 392b26c  HDFS-15628. HttpFS server throws NPE if a file is a symlink. 
Contributed by Ahmed Hussein.
392b26c is described below

commit 392b26c23ba38cc6fe01323748d1b10c5cc3f50d
Author: Kihwal Lee 
AuthorDate: Wed Oct 14 18:19:16 2020 -0500

HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by 
Ahmed Hussein.

(cherry picked from commit e45407128d4e9a9804c777c8f845ad41e1280177)
---
 .../hadoop/fs/http/client/HttpFSFileSystem.java|  9 --
 .../apache/hadoop/fs/http/server/FSOperations.java | 14 ++--
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  | 37 ++
 3 files changed, 56 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 66c821c..d1fdaf1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -188,6 +188,7 @@ public class HttpFSFileSystem extends FileSystem
   public static final String XATTR_NAME_JSON = "name";
   public static final String XATTR_VALUE_JSON = "value";
   public static final String XATTRNAMES_JSON = "XAttrNames";
+  public static final String SYMLINK_JSON = "symlink";
 
   public static final String FILE_CHECKSUM_JSON = "FileChecksum";
   public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
@@ -1083,6 +1084,9 @@ public class HttpFSFileSystem extends FileSystem
 String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
 Path path = (pathSuffix.equals("")) ? parent : new Path(parent, 
pathSuffix);
 FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON));
+String symLinkValue =
+type == FILE_TYPE.SYMLINK ? (String) json.get(SYMLINK_JSON) : null;
+Path symLink = symLinkValue == null ? null : new Path(symLinkValue);
 long len = (Long) json.get(LENGTH_JSON);
 String owner = (String) json.get(OWNER_JSON);
 String group = (String) json.get(GROUP_JSON);
@@ -1107,11 +,12 @@ public class HttpFSFileSystem extends FileSystem
   new FsPermissionExtension(permission, aBit, eBit, ecBit);
   FileStatus fileStatus = new FileStatus(len, FILE_TYPE.DIRECTORY == type,
   replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
-  null, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
+  symLink, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
   return fileStatus;
 } else {
   return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
-  replication, blockSize, mTime, aTime, permission, owner, group, 
path);
+  replication, blockSize, mTime, aTime, permission, owner, group,
+  symLink, path);
 }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 043f3e1..896a18e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem.FILE_TYPE;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -107,8 +108,17 @@ public class FSOperations {
 Map json = new LinkedHashMap();
 json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
 (emptyPathSuffix) ? "" : fileStatus.getPath().getName());
-json.put(HttpFSFileSystem.TYPE_JSON,
-HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
+FILE_TYPE fileType = HttpFSFileSystem.FILE_TYPE.getType(fileStatus);
+json.put(HttpFSFileSystem.TYPE_JSON, fileType.toString());
+if (fileType.equals(FILE_TYPE.SYMLINK)) {
+  // put the symlink into Json
+  try {
+json.put(HttpFSFileSystem.SYMLINK_JSON,
+fileStatus.getSymlink().getName());
+  } catch (IOException e) {
+// Can't happen.
+  }
+}
 json.put(HttpFSFileSystem.LENGTH_JSON, file

[hadoop] branch branch-3.3 updated: HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by Ahmed Hussein.

2020-10-14 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 41a3c9b  HDFS-15628. HttpFS server throws NPE if a file is a symlink. 
Contributed by Ahmed Hussein.
41a3c9b is described below

commit 41a3c9bc95c73bf7be3ffb0e669d42aa57af2eb6
Author: Kihwal Lee 
AuthorDate: Wed Oct 14 17:28:02 2020 -0500

HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by 
Ahmed Hussein.

(cherry picked from commit e45407128d4e9a9804c777c8f845ad41e1280177)
---
 .../hadoop/fs/http/client/HttpFSFileSystem.java|  9 --
 .../apache/hadoop/fs/http/server/FSOperations.java | 14 ++--
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  | 37 ++
 3 files changed, 56 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 1722759..c77c6fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -197,6 +197,7 @@ public class HttpFSFileSystem extends FileSystem
   public static final String XATTR_VALUE_JSON = "value";
   public static final String XATTRNAMES_JSON = "XAttrNames";
   public static final String ECPOLICY_JSON = "ecPolicyObj";
+  public static final String SYMLINK_JSON = "symlink";
 
   public static final String FILE_CHECKSUM_JSON = "FileChecksum";
   public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
@@ -1093,6 +1094,9 @@ public class HttpFSFileSystem extends FileSystem
 String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
 Path path = (pathSuffix.equals("")) ? parent : new Path(parent, 
pathSuffix);
 FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON));
+String symLinkValue =
+type == FILE_TYPE.SYMLINK ? (String) json.get(SYMLINK_JSON) : null;
+Path symLink = symLinkValue == null ? null : new Path(symLinkValue);
 long len = (Long) json.get(LENGTH_JSON);
 String owner = (String) json.get(OWNER_JSON);
 String group = (String) json.get(GROUP_JSON);
@@ -1117,11 +1121,12 @@ public class HttpFSFileSystem extends FileSystem
   new FsPermissionExtension(permission, aBit, eBit, ecBit);
   FileStatus fileStatus = new FileStatus(len, FILE_TYPE.DIRECTORY == type,
   replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
-  null, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
+  symLink, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
   return fileStatus;
 } else {
   return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
-  replication, blockSize, mTime, aTime, permission, owner, group, 
path);
+  replication, blockSize, mTime, aTime, permission, owner, group,
+  symLink, path);
 }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 0f8ea07..a1dcc49 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem.FILE_TYPE;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -110,8 +111,17 @@ public class FSOperations {
 Map json = new LinkedHashMap();
 json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
 (emptyPathSuffix) ? "" : fileStatus.getPath().getName());
-json.put(HttpFSFileSystem.TYPE_JSON,
-HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
+FILE_TYPE fileType = HttpFSFileSystem.FILE_TYPE.getType(fileStatus);
+json.put(HttpFSFileSystem.TYPE_JSON, fileType.toString());
+if (fileType.equals(FILE_TYPE.SYMLINK)) {
+  // put the symlink into Json
+  try {
+json.put(HttpFSFileSystem.SYMLINK_JSON,
+fileStatus.getSymlink().getName());
+  } catch (IOException e) {
+// Can't happen.
+  }
+}
 json.put(HttpFSFileSystem.LENGTH_JSON, file

[hadoop] branch trunk updated: HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by Ahmed Hussein.

2020-10-14 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e454071  HDFS-15628. HttpFS server throws NPE if a file is a symlink. 
Contributed by Ahmed Hussein.
e454071 is described below

commit e45407128d4e9a9804c777c8f845ad41e1280177
Author: Kihwal Lee 
AuthorDate: Wed Oct 14 17:26:04 2020 -0500

HDFS-15628. HttpFS server throws NPE if a file is a symlink. Contributed by 
Ahmed Hussein.
---
 .../hadoop/fs/http/client/HttpFSFileSystem.java|  9 --
 .../apache/hadoop/fs/http/server/FSOperations.java | 14 ++--
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  | 37 ++
 3 files changed, 56 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index ccd71d9..c907463 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -199,6 +199,7 @@ public class HttpFSFileSystem extends FileSystem
   public static final String XATTR_VALUE_JSON = "value";
   public static final String XATTRNAMES_JSON = "XAttrNames";
   public static final String ECPOLICY_JSON = "ecPolicyObj";
+  public static final String SYMLINK_JSON = "symlink";
 
   public static final String FILE_CHECKSUM_JSON = "FileChecksum";
   public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
@@ -1101,6 +1102,9 @@ public class HttpFSFileSystem extends FileSystem
 String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
 Path path = (pathSuffix.equals("")) ? parent : new Path(parent, 
pathSuffix);
 FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON));
+String symLinkValue =
+type == FILE_TYPE.SYMLINK ? (String) json.get(SYMLINK_JSON) : null;
+Path symLink = symLinkValue == null ? null : new Path(symLinkValue);
 long len = (Long) json.get(LENGTH_JSON);
 String owner = (String) json.get(OWNER_JSON);
 String group = (String) json.get(GROUP_JSON);
@@ -1125,11 +1129,12 @@ public class HttpFSFileSystem extends FileSystem
   new FsPermissionExtension(permission, aBit, eBit, ecBit);
   FileStatus fileStatus = new FileStatus(len, FILE_TYPE.DIRECTORY == type,
   replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
-  null, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
+  symLink, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
   return fileStatus;
 } else {
   return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
-  replication, blockSize, mTime, aTime, permission, owner, group, 
path);
+  replication, blockSize, mTime, aTime, permission, owner, group,
+  symLink, path);
 }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 6effb83..67d4761 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem.FILE_TYPE;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -111,8 +112,17 @@ public class FSOperations {
 Map json = new LinkedHashMap();
 json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
 (emptyPathSuffix) ? "" : fileStatus.getPath().getName());
-json.put(HttpFSFileSystem.TYPE_JSON,
-HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
+FILE_TYPE fileType = HttpFSFileSystem.FILE_TYPE.getType(fileStatus);
+json.put(HttpFSFileSystem.TYPE_JSON, fileType.toString());
+if (fileType.equals(FILE_TYPE.SYMLINK)) {
+  // put the symlink into Json
+  try {
+json.put(HttpFSFileSystem.SYMLINK_JSON,
+fileStatus.getSymlink().getName());
+  } catch (IOException e) {
+// Can't happen.
+  }
+}
 json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
 json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
 js

[hadoop] branch branch-3.2 updated: HDFS-15581. Access Controlled HttpFS Server. Contributed by Richard Ross. (cherry picked from commit dfc268221352880992c55abec9e6b7b73044b0f1)

2020-09-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 25a51f2  HDFS-15581. Access Controlled HttpFS Server. Contributed by 
Richard Ross. (cherry picked from commit 
dfc268221352880992c55abec9e6b7b73044b0f1)
25a51f2 is described below

commit 25a51f2f398b8399cdd36d37bacd796cb5bcd8f4
Author: Kihwal Lee 
AuthorDate: Tue Sep 22 11:12:43 2020 -0500

HDFS-15581. Access Controlled HttpFS Server. Contributed by Richard Ross.
(cherry picked from commit dfc268221352880992c55abec9e6b7b73044b0f1)
---
 .../apache/hadoop/fs/http/server/HttpFSServer.java |  48 +++
 .../src/main/resources/httpfs-default.xml  |  11 +
 .../fs/http/server/TestHttpFSAccessControlled.java | 355 +
 3 files changed, 414 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index f2ef811..bb031fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -103,8 +103,38 @@ import java.util.Map;
 @Path(HttpFSFileSystem.SERVICE_VERSION)
 @InterfaceAudience.Private
 public class HttpFSServer {
+
+  enum AccessMode {
+READWRITE, WRITEONLY, READONLY;
+  }
   private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
   private static final Logger LOG = 
LoggerFactory.getLogger(HttpFSServer.class);
+  AccessMode accessMode = AccessMode.READWRITE;
+
+  public HttpFSServer() {
+Configuration conf = HttpFSServerWebApp.get().getConfig();
+final String accessModeString =  conf.get("httpfs.access.mode", 
"read-write").toLowerCase();
+if(accessModeString.compareTo("write-only") == 0)
+  accessMode = AccessMode.WRITEONLY;
+else if(accessModeString.compareTo("read-only") == 0)
+  accessMode = AccessMode.READONLY;
+else
+  accessMode = AccessMode.READWRITE;
+  }
+
+
+  // First try getting a user through HttpUserGroupInformation. This will 
return
+  // if the built-in hadoop auth filter is not used.  Fall back to getting the
+  // authenticated user from the request.
+  private UserGroupInformation getHttpUGI(HttpServletRequest request) {
+UserGroupInformation user = HttpUserGroupInformation.get();
+if (user != null) {
+  return user;
+}
+
+return 
UserGroupInformation.createRemoteUser(request.getUserPrincipal().getName());
+  }
+
 
   /**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem 
for the effective
@@ -215,6 +245,12 @@ public class HttpFSServer {
   @Context Parameters params,
   @Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode
+if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) &&
+(op.value() != HttpFSFileSystem.Operation.LISTSTATUS) &&
+accessMode == AccessMode.WRITEONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInformation user = HttpUserGroupInformation.get();
 Response response;
 path = makeAbsolute(path);
@@ -461,6 +497,10 @@ public class HttpFSServer {
  @Context Parameters params,
  @Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Do not allow DELETE commands in read-only mode
+if(accessMode == AccessMode.READONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInformation user = HttpUserGroupInformation.get();
 Response response;
 path = makeAbsolute(path);
@@ -524,6 +564,10 @@ public class HttpFSServer {
@Context Parameters params,
@Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Do not allow POST commands in read-only mode
+if(accessMode == AccessMode.READONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInformation user = HttpUserGroupInformation.get();
 Response response;
 path = makeAbsolute(path);
@@ -634,6 +678,10 @@ public class HttpFSServer {
@Context Parameters params,
@Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Do not allow PUT commands in read-only mode
+if(accessMode == AccessMode.READONLY

[hadoop] branch branch-3.3 updated: HDFS-15581. Access Controlled HttpFS Server. Contributed by Richard Ross.

2020-09-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 7eb91ac  HDFS-15581. Access Controlled HttpFS Server. Contributed by 
Richard Ross.
7eb91ac is described below

commit 7eb91ac1b2fb12ebcd86e1e3b3291f999826aff9
Author: Kihwal Lee 
AuthorDate: Tue Sep 22 10:55:26 2020 -0500

HDFS-15581. Access Controlled HttpFS Server. Contributed by Richard Ross.

(cherry picked from commit dfc268221352880992c55abec9e6b7b73044b0f1)
---
 .../apache/hadoop/fs/http/server/HttpFSServer.java |  48 +++
 .../src/main/resources/httpfs-default.xml  |  11 +
 .../fs/http/server/TestHttpFSAccessControlled.java | 355 +
 3 files changed, 414 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index 3cca83e..76b0a83 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -106,8 +106,38 @@ import java.util.Map;
 @Path(HttpFSFileSystem.SERVICE_VERSION)
 @InterfaceAudience.Private
 public class HttpFSServer {
+
+  enum AccessMode {
+READWRITE, WRITEONLY, READONLY;
+  }
   private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
   private static final Logger LOG = 
LoggerFactory.getLogger(HttpFSServer.class);
+  AccessMode accessMode = AccessMode.READWRITE;
+
+  public HttpFSServer() {
+Configuration conf = HttpFSServerWebApp.get().getConfig();
+final String accessModeString =  conf.get("httpfs.access.mode", 
"read-write").toLowerCase();
+if(accessModeString.compareTo("write-only") == 0)
+  accessMode = AccessMode.WRITEONLY;
+else if(accessModeString.compareTo("read-only") == 0)
+  accessMode = AccessMode.READONLY;
+else
+  accessMode = AccessMode.READWRITE;
+  }
+
+
+  // First try getting a user through HttpUserGroupInformation. This will 
return
+  // if the built-in hadoop auth filter is not used.  Fall back to getting the
+  // authenticated user from the request.
+  private UserGroupInformation getHttpUGI(HttpServletRequest request) {
+UserGroupInformation user = HttpUserGroupInformation.get();
+if (user != null) {
+  return user;
+}
+
+return 
UserGroupInformation.createRemoteUser(request.getUserPrincipal().getName());
+  }
+
 
   /**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem 
for the effective
@@ -218,6 +248,12 @@ public class HttpFSServer {
   @Context Parameters params,
   @Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode
+if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) &&
+(op.value() != HttpFSFileSystem.Operation.LISTSTATUS) &&
+accessMode == AccessMode.WRITEONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInformation user = HttpUserGroupInformation.get();
 Response response;
 path = makeAbsolute(path);
@@ -490,6 +526,10 @@ public class HttpFSServer {
  @Context Parameters params,
  @Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Do not allow DELETE commands in read-only mode
+if(accessMode == AccessMode.READONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInformation user = HttpUserGroupInformation.get();
 Response response;
 path = makeAbsolute(path);
@@ -577,6 +617,10 @@ public class HttpFSServer {
@Context Parameters params,
@Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Do not allow POST commands in read-only mode
+if(accessMode == AccessMode.READONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInformation user = HttpUserGroupInformation.get();
 Response response;
 path = makeAbsolute(path);
@@ -718,6 +762,10 @@ public class HttpFSServer {
@Context Parameters params,
@Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Do not allow PUT commands in read-only mode
+if(accessMode == AccessMode.READONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInforma

[hadoop] branch trunk updated: HDFS-15581. Access Controlled HttpFS Server. Contributed by Richard Ross.

2020-09-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new dfc2682  HDFS-15581. Access Controlled HttpFS Server. Contributed by 
Richard Ross.
dfc2682 is described below

commit dfc268221352880992c55abec9e6b7b73044b0f1
Author: Kihwal Lee 
AuthorDate: Tue Sep 22 10:53:04 2020 -0500

HDFS-15581. Access Controlled HttpFS Server. Contributed by Richard Ross.
---
 .../apache/hadoop/fs/http/server/HttpFSServer.java |  48 +++
 .../src/main/resources/httpfs-default.xml  |  11 +
 .../fs/http/server/TestHttpFSAccessControlled.java | 355 +
 3 files changed, 414 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index bae9dd1..f2dd5d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -107,8 +107,38 @@ import java.util.Set;
 @Path(HttpFSFileSystem.SERVICE_VERSION)
 @InterfaceAudience.Private
 public class HttpFSServer {
+
+  enum AccessMode {
+READWRITE, WRITEONLY, READONLY;
+  }
   private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
   private static final Logger LOG = 
LoggerFactory.getLogger(HttpFSServer.class);
+  AccessMode accessMode = AccessMode.READWRITE;
+
+  public HttpFSServer() {
+Configuration conf = HttpFSServerWebApp.get().getConfig();
+final String accessModeString =  conf.get("httpfs.access.mode", 
"read-write").toLowerCase();
+if(accessModeString.compareTo("write-only") == 0)
+  accessMode = AccessMode.WRITEONLY;
+else if(accessModeString.compareTo("read-only") == 0)
+  accessMode = AccessMode.READONLY;
+else
+  accessMode = AccessMode.READWRITE;
+  }
+
+
+  // First try getting a user through HttpUserGroupInformation. This will 
return
+  // if the built-in hadoop auth filter is not used.  Fall back to getting the
+  // authenticated user from the request.
+  private UserGroupInformation getHttpUGI(HttpServletRequest request) {
+UserGroupInformation user = HttpUserGroupInformation.get();
+if (user != null) {
+  return user;
+}
+
+return 
UserGroupInformation.createRemoteUser(request.getUserPrincipal().getName());
+  }
+
 
   /**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem 
for the effective
@@ -219,6 +249,12 @@ public class HttpFSServer {
   @Context Parameters params,
   @Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode
+if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) &&
+(op.value() != HttpFSFileSystem.Operation.LISTSTATUS) &&
+accessMode == AccessMode.WRITEONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInformation user = HttpUserGroupInformation.get();
 Response response;
 path = makeAbsolute(path);
@@ -491,6 +527,10 @@ public class HttpFSServer {
  @Context Parameters params,
  @Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Do not allow DELETE commands in read-only mode
+if(accessMode == AccessMode.READONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInformation user = HttpUserGroupInformation.get();
 Response response;
 path = makeAbsolute(path);
@@ -578,6 +618,10 @@ public class HttpFSServer {
@Context Parameters params,
@Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Do not allow POST commands in read-only mode
+if(accessMode == AccessMode.READONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInformation user = HttpUserGroupInformation.get();
 Response response;
 path = makeAbsolute(path);
@@ -718,6 +762,10 @@ public class HttpFSServer {
@Context Parameters params,
@Context HttpServletRequest request)
 throws IOException, FileSystemAccessException {
+// Do not allow PUT commands in read-only mode
+if(accessMode == AccessMode.READONLY) {
+  return Response.status(Response.Status.FORBIDDEN).build();
+}
 UserGroupInformation user = HttpUserGroupInformation.get();
 Response response;
 path = makeAbsolu

[hadoop] branch branch-3.1 updated: MAPREDUCE-7069. Add ability to specify user environment variables individually. Contributed by Jim Brennan

2020-08-13 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 0c35e44  MAPREDUCE-7069. Add ability to specify user environment 
variables individually. Contributed by Jim Brennan
0c35e44 is described below

commit 0c35e4418479635406ab278629f1da15e7d8e7ae
Author: Kihwal Lee 
AuthorDate: Thu Aug 13 19:02:03 2020 -0500

MAPREDUCE-7069. Add ability to specify user environment variables 
individually. Contributed by Jim Brennan

(cherry picked from commit 4571351cccf6d4977469d3d623cf045b06a5f5f0)
---
 .../apache/hadoop/mapred/MapReduceChildJVM.java|  73 +--
 .../mapreduce/v2/app/job/impl/TaskAttemptImpl.java |   8 +-
 .../v2/app/job/impl/TestMapReduceChildJVM.java |  24 +++-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java|  10 ++
 .../java/org/apache/hadoop/mapred/JobConf.java |  18 +++
 .../src/main/resources/mapred-default.xml  |  61 +++--
 .../src/site/markdown/MapReduceTutorial.md |   6 +
 .../java/org/apache/hadoop/mapred/YARNRunner.java  |  11 +-
 .../org/apache/hadoop/mapred/TestYARNRunner.java   |  26 +++-
 .../java/org/apache/hadoop/yarn/util/Apps.java | 115 ++---
 .../java/org/apache/hadoop/yarn/util/TestApps.java | 136 +
 11 files changed, 407 insertions(+), 81 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
index 936dc5a..d305f9f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.mapred;
 
 import java.net.InetSocketAddress;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Vector;
@@ -28,7 +27,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.TaskLog.LogName;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
@@ -42,50 +40,53 @@ public class MapReduceChildJVM {
 filter.toString();
   }
 
-  private static String getChildEnv(JobConf jobConf, boolean isMap) {
+  private static String getChildEnvProp(JobConf jobConf, boolean isMap) {
 if (isMap) {
-  return jobConf.get(JobConf.MAPRED_MAP_TASK_ENV,
-  jobConf.get(JobConf.MAPRED_TASK_ENV));
+  return JobConf.MAPRED_MAP_TASK_ENV;
 }
-return jobConf.get(JobConf.MAPRED_REDUCE_TASK_ENV,
-jobConf.get(JobConf.MAPRED_TASK_ENV));
+return JobConf.MAPRED_REDUCE_TASK_ENV;
+  }
+
+  private static String getChildEnvDefaultValue(JobConf jobConf) {
+// There is no default value for these - use the fallback value instead.
+return jobConf.get(JobConf.MAPRED_TASK_ENV);
   }
 
   public static void setVMEnv(Map environment,
   Task task) {
 
 JobConf conf = task.conf;
-// Add the env variables passed by the user
-String mapredChildEnv = getChildEnv(conf, task.isMapTask());
-MRApps.setEnvFromInputString(environment, mapredChildEnv, conf);
-
-// Set logging level in the environment.
-// This is so that, if the child forks another "bin/hadoop" (common in
-// streaming) it will have the correct loglevel.
-environment.put(
-"HADOOP_ROOT_LOGGER", 
-MRApps.getChildLogLevel(conf, task.isMapTask()) + ",console");
-
-// TODO: The following is useful for instance in streaming tasks. Should be
-// set in ApplicationMaster's env by the RM.
-String hadoopClientOpts = System.getenv("HADOOP_CLIENT_OPTS");
-if (hadoopClientOpts == null) {
-  hadoopClientOpts = "";
-} else {
-  hadoopClientOpts = hadoopClientOpts + " ";
+boolean isMap = task.isMapTask();
+
+// Remove these before adding the user variables to prevent
+// MRApps.setEnvFromInputProperty() from appending to them.
+String hadoopRootLoggerKey = "HADOOP_ROOT_LOGGER";
+String hadoopClientOptsKey = "HADOOP_CLIENT_OPTS";
+environment.remove(hadoopRootLoggerKey);
+environment.remove(hadoopClientOptsKey);
+
+// Add the environment variables passed by the user
+MRApps.setEnvFromInputProperty(environment, getChildEn

[hadoop] branch branch-2.10 updated: MAPREDUCE-7069. Add ability to specify user environment variables individually. Contributed by Jim Brennan

2020-08-13 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 283def8  MAPREDUCE-7069. Add ability to specify user environment 
variables individually. Contributed by Jim Brennan
283def8 is described below

commit 283def8bc5f16b139c0b35ad924085ee3baf9631
Author: Kihwal Lee 
AuthorDate: Thu Aug 13 18:54:31 2020 -0500

MAPREDUCE-7069. Add ability to specify user environment variables 
individually. Contributed by Jim Brennan

(cherry picked from commit 4571351cccf6d4977469d3d623cf045b06a5f5f0)
---
 .../apache/hadoop/mapred/MapReduceChildJVM.java|  73 +--
 .../mapreduce/v2/app/job/impl/TaskAttemptImpl.java |   8 +-
 .../v2/app/job/impl/TestMapReduceChildJVM.java |  24 +++-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java|  10 ++
 .../java/org/apache/hadoop/mapred/JobConf.java |  18 +++
 .../src/main/resources/mapred-default.xml  |  61 +++--
 .../src/site/markdown/MapReduceTutorial.md |   6 +
 .../java/org/apache/hadoop/mapred/YARNRunner.java  |  11 +-
 .../org/apache/hadoop/mapred/TestYARNRunner.java   |  26 +++-
 .../java/org/apache/hadoop/yarn/util/Apps.java | 115 ++---
 .../java/org/apache/hadoop/yarn/util/TestApps.java | 136 +
 11 files changed, 407 insertions(+), 81 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
index 817b3a5..1a427a0 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.mapred;
 
 import java.net.InetSocketAddress;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Vector;
@@ -27,7 +26,6 @@ import java.util.Vector;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.TaskLog.LogName;
 import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
@@ -41,50 +39,53 @@ public class MapReduceChildJVM {
 filter.toString();
   }
 
-  private static String getChildEnv(JobConf jobConf, boolean isMap) {
+  private static String getChildEnvProp(JobConf jobConf, boolean isMap) {
 if (isMap) {
-  return jobConf.get(JobConf.MAPRED_MAP_TASK_ENV,
-  jobConf.get(JobConf.MAPRED_TASK_ENV));
+  return JobConf.MAPRED_MAP_TASK_ENV;
 }
-return jobConf.get(JobConf.MAPRED_REDUCE_TASK_ENV,
-jobConf.get(JobConf.MAPRED_TASK_ENV));
+return JobConf.MAPRED_REDUCE_TASK_ENV;
+  }
+
+  private static String getChildEnvDefaultValue(JobConf jobConf) {
+// There is no default value for these - use the fallback value instead.
+return jobConf.get(JobConf.MAPRED_TASK_ENV);
   }
 
   public static void setVMEnv(Map environment,
   Task task) {
 
 JobConf conf = task.conf;
-// Add the env variables passed by the user
-String mapredChildEnv = getChildEnv(conf, task.isMapTask());
-MRApps.setEnvFromInputString(environment, mapredChildEnv, conf);
-
-// Set logging level in the environment.
-// This is so that, if the child forks another "bin/hadoop" (common in
-// streaming) it will have the correct loglevel.
-environment.put(
-"HADOOP_ROOT_LOGGER", 
-MRApps.getChildLogLevel(conf, task.isMapTask()) + ",console");
-
-// TODO: The following is useful for instance in streaming tasks. Should be
-// set in ApplicationMaster's env by the RM.
-String hadoopClientOpts = System.getenv("HADOOP_CLIENT_OPTS");
-if (hadoopClientOpts == null) {
-  hadoopClientOpts = "";
-} else {
-  hadoopClientOpts = hadoopClientOpts + " ";
+boolean isMap = task.isMapTask();
+
+// Remove these before adding the user variables to prevent
+// MRApps.setEnvFromInputProperty() from appending to them.
+String hadoopRootLoggerKey = "HADOOP_ROOT_LOGGER";
+String hadoopClientOptsKey = "HADOOP_CLIENT_OPTS";
+environment.remove(hadoopRootLoggerKey);
+environment.remove(hadoopClientOptsKey);
+
+// Add the environment variables passed by the user
+MRApps.setEnvFromInputProperty(environment, getChildEnvProp(conf, isMap),

[hadoop] branch branch-2.10 updated: HDFS-12453. TestDataNodeHotSwapVolumes fails in trunk Jenkins runs. Contributed by Jim Brennan and Lei Xu.

2020-06-12 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 1710975  HDFS-12453. TestDataNodeHotSwapVolumes fails in trunk Jenkins 
runs. Contributed by Jim Brennan and Lei Xu.
1710975 is described below

commit 17109758dd6f9a86b226c025ee20b8e2abc9d366
Author: Kihwal Lee 
AuthorDate: Fri Jun 12 16:19:36 2020 -0500

HDFS-12453. TestDataNodeHotSwapVolumes fails in trunk Jenkins runs. 
Contributed by Jim Brennan and Lei Xu.
---
 .../datanode/TestDataNodeHotSwapVolumes.java   | 149 ++---
 1 file changed, 97 insertions(+), 52 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 93c1242..e98b90a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.BlockMissingException;
+import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -36,6 +37,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -46,6 +49,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -83,6 +87,7 @@ import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -777,12 +782,11 @@ public class TestDataNodeHotSwapVolumes {
   private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx)
   throws IOException, ReconfigurationException, TimeoutException,
   InterruptedException, BrokenBarrierException {
-// Starts DFS cluster with 3 DataNodes to form a pipeline.
-startDFSCluster(1, 3);
+startDFSCluster(1, 4);
 
 final short REPLICATION = 3;
-final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
-final FileSystem fs = cluster.getFileSystem();
+final DistributedFileSystem fs = cluster.getFileSystem();
+final DFSClient client = fs.getClient();
 final Path testFile = new Path("/test");
 FSDataOutputStream out = fs.create(testFile, REPLICATION);
 
@@ -792,54 +796,102 @@ public class TestDataNodeHotSwapVolumes {
 out.write(writeBuf);
 out.hflush();
 
-// Make FsDatasetSpi#finalizeBlock a time-consuming operation. So if the
-// BlockReceiver releases volume reference before finalizeBlock(), the 
blocks
-// on the volume will be removed, and finalizeBlock() throws IOE.
-final FsDatasetSpi data = dn.data;
-dn.data = Mockito.spy(data);
-doAnswer(new Answer() {
-  public Object answer(InvocationOnMock invocation)
-  throws IOException, InterruptedException {
-Thread.sleep(1000);
-// Bypass the argument to FsDatasetImpl#finalizeBlock to verify 
that
-// the block is not removed, since the volume reference should not
-// be released at this point.
-data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0],
-  (boolean) invocation.getArguments()[1]);
-return null;
-  }
-}).when(dn.data).finalizeBlock(any(ExtendedBlock.class),
-Mockito.anyBoolean());
-
-final CyclicBarrier barrier = new CyclicBarrier(2);
+BlockLocation[] blocks = fs.getFileBlockLocations(t

[hadoop] branch branch-2.9 updated: HADOOP-16255. Add ChecksumFs.rename(path, path, boolean). Contributed by Jungtaek Lim.

2020-06-08 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new 89b9765  HADOOP-16255. Add ChecksumFs.rename(path, path, boolean). 
Contributed by Jungtaek Lim.
89b9765 is described below

commit 89b976556f4c40f16ea82e3e140239a42cadf408
Author: Kihwal Lee 
AuthorDate: Mon Jun 8 17:38:33 2020 -0500

HADOOP-16255. Add ChecksumFs.rename(path, path, boolean). Contributed by 
Jungtaek Lim.

(cherry picked from commit 14ff6171a5879f63c1188b07cff8cbe135b9f802)
---
 .../main/java/org/apache/hadoop/fs/ChecksumFs.java | 26 ++
 1 file changed, 26 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index e006c13..b0fdf01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -472,6 +472,32 @@ public abstract class ChecksumFs extends FilterFs {
 }
   }
 
+  @Override
+  public void renameInternal(Path src, Path dst, boolean overwrite)
+  throws AccessControlException, FileAlreadyExistsException,
+  FileNotFoundException, ParentNotDirectoryException,
+  UnresolvedLinkException, IOException {
+Options.Rename renameOpt = Options.Rename.NONE;
+if (overwrite) {
+  renameOpt = Options.Rename.OVERWRITE;
+}
+
+if (isDirectory(src)) {
+  getMyFs().rename(src, dst, renameOpt);
+} else {
+  getMyFs().rename(src, dst, renameOpt);
+
+  Path checkFile = getChecksumFile(src);
+  if (exists(checkFile)) { //try to rename checksum
+if (isDirectory(dst)) {
+  getMyFs().rename(checkFile, dst, renameOpt);
+} else {
+  getMyFs().rename(checkFile, getChecksumFile(dst), renameOpt);
+}
+  }
+}
+  }
+
   /**
* Implement the delete(Path, boolean) in checksum
* file system.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.8 updated: HADOOP-16255. Add ChecksumFs.rename(path, path, boolean). Contributed by Jungtaek Lim.

2020-06-08 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new eb818cd  HADOOP-16255. Add ChecksumFs.rename(path, path, boolean). 
Contributed by Jungtaek Lim.
eb818cd is described below

commit eb818cdc64336ade273a960ba3b9b5a5d0c4d4ec
Author: Kihwal Lee 
AuthorDate: Mon Jun 8 17:36:10 2020 -0500

HADOOP-16255. Add ChecksumFs.rename(path, path, boolean). Contributed by 
Jungtaek Lim.

(cherry picked from commit 14ff6171a5879f63c1188b07cff8cbe135b9f802)
---
 .../main/java/org/apache/hadoop/fs/ChecksumFs.java | 26 ++
 1 file changed, 26 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 5c54554..ef262c8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -472,6 +472,32 @@ public abstract class ChecksumFs extends FilterFs {
 }
   }
 
+  @Override
+  public void renameInternal(Path src, Path dst, boolean overwrite)
+  throws AccessControlException, FileAlreadyExistsException,
+  FileNotFoundException, ParentNotDirectoryException,
+  UnresolvedLinkException, IOException {
+Options.Rename renameOpt = Options.Rename.NONE;
+if (overwrite) {
+  renameOpt = Options.Rename.OVERWRITE;
+}
+
+if (isDirectory(src)) {
+  getMyFs().rename(src, dst, renameOpt);
+} else {
+  getMyFs().rename(src, dst, renameOpt);
+
+  Path checkFile = getChecksumFile(src);
+  if (exists(checkFile)) { //try to rename checksum
+if (isDirectory(dst)) {
+  getMyFs().rename(checkFile, dst, renameOpt);
+} else {
+  getMyFs().rename(checkFile, getChecksumFile(dst), renameOpt);
+}
+  }
+}
+  }
+
   /**
* Implement the delete(Path, boolean) in checksum
* file system.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HADOOP-16255. Add ChecksumFs.rename(path, path, boolean). Contributed by Jungtaek Lim.

2020-06-08 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 14ff617  HADOOP-16255. Add ChecksumFs.rename(path, path, boolean). 
Contributed by Jungtaek Lim.
14ff617 is described below

commit 14ff6171a5879f63c1188b07cff8cbe135b9f802
Author: Kihwal Lee 
AuthorDate: Mon Jun 8 17:31:16 2020 -0500

HADOOP-16255. Add ChecksumFs.rename(path, path, boolean). Contributed by 
Jungtaek Lim.
---
 .../main/java/org/apache/hadoop/fs/ChecksumFs.java | 26 ++
 1 file changed, 26 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index e006c13..b0fdf01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -472,6 +472,32 @@ public abstract class ChecksumFs extends FilterFs {
 }
   }
 
+  @Override
+  public void renameInternal(Path src, Path dst, boolean overwrite)
+  throws AccessControlException, FileAlreadyExistsException,
+  FileNotFoundException, ParentNotDirectoryException,
+  UnresolvedLinkException, IOException {
+Options.Rename renameOpt = Options.Rename.NONE;
+if (overwrite) {
+  renameOpt = Options.Rename.OVERWRITE;
+}
+
+if (isDirectory(src)) {
+  getMyFs().rename(src, dst, renameOpt);
+} else {
+  getMyFs().rename(src, dst, renameOpt);
+
+  Path checkFile = getChecksumFile(src);
+  if (exists(checkFile)) { //try to rename checksum
+if (isDirectory(dst)) {
+  getMyFs().rename(checkFile, dst, renameOpt);
+} else {
+  getMyFs().rename(checkFile, getChecksumFile(dst), renameOpt);
+}
+  }
+}
+  }
+
   /**
* Implement the delete(Path, boolean) in checksum
* file system.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-16255. Add ChecksumFs.rename(path, path, boolean)

2020-06-08 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 400c5ac  HADOOP-16255. Add ChecksumFs.rename(path, path, boolean)
400c5ac is described below

commit 400c5accd9f5cdfadccf78e88c5b3e148fc07bb1
Author: Kihwal Lee 
AuthorDate: Mon Jun 8 17:25:15 2020 -0500

HADOOP-16255. Add ChecksumFs.rename(path, path, boolean)

Contributed by Jungtaek Lim

Change-Id: If00a4d7d30456c08eb2b0f7e2b242197bc4ee05d
(cherry picked from commit bb0b922a71cba9ceaf00588e9f3e3b2a3c2e3eab)
---
 .../main/java/org/apache/hadoop/fs/ChecksumFs.java |  26 
 .../java/org/apache/hadoop/fs/TestChecksumFs.java  | 135 +
 2 files changed, 161 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index c56f6e0..aed9db3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -473,6 +473,32 @@ public abstract class ChecksumFs extends FilterFs {
 }
   }
 
+  @Override
+  public void renameInternal(Path src, Path dst, boolean overwrite)
+  throws AccessControlException, FileAlreadyExistsException,
+  FileNotFoundException, ParentNotDirectoryException,
+  UnresolvedLinkException, IOException {
+Options.Rename renameOpt = Options.Rename.NONE;
+if (overwrite) {
+  renameOpt = Options.Rename.OVERWRITE;
+}
+
+if (isDirectory(src)) {
+  getMyFs().rename(src, dst, renameOpt);
+} else {
+  getMyFs().rename(src, dst, renameOpt);
+
+  Path checkFile = getChecksumFile(src);
+  if (exists(checkFile)) { //try to rename checksum
+if (isDirectory(dst)) {
+  getMyFs().rename(checkFile, dst, renameOpt);
+} else {
+  getMyFs().rename(checkFile, getChecksumFile(dst), renameOpt);
+}
+  }
+}
+  }
+
   /**
* Implement the delete(Path, boolean) in checksum
* file system.
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFs.java
new file mode 100644
index 000..0959845
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFs.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.HadoopTestBase;
+
+import static org.apache.hadoop.fs.CreateFlag.*;
+
+/**
+ * This class tests the functionality of ChecksumFs.
+ */
+public class TestChecksumFs extends HadoopTestBase {
+  private Configuration conf;
+  private Path testRootDirPath;
+  private FileContext fc;
+
+  @Before
+  public void setUp() throws Exception {
+conf = getTestConfiguration();
+fc = FileContext.getFileContext(conf);
+testRootDirPath = new Path(GenericTestUtils.getRandomizedTestDir()
+.getAbsolutePath());
+mkdirs(testRootDirPath);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+if (fc != null) {
+  fc.delete(testRootDirPath, true);
+}
+  }
+
+  @Test
+  public void testRenameFileToFile() throws Exception {
+Path srcPath = new Path(testRootDirPath, "testRenameSrc");
+Path dstPath = new Path(testRootDirPath, "testRenameDst");
+verifyRename(srcPath, dstPath, false);
+  }
+
+  @Test
+  public void testRenameFileToFileWithOverwrite() throws Exception {
+Path srcPath = new Path(testRootDirPath, "testRenameSrc");
+Path dstPath = new Path(te

[hadoop] branch branch-2.10 updated: HDFS-14931. hdfs crypto commands limit column width. Contributed by Eric Badger

2020-06-02 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 54c7d23  HDFS-14931. hdfs crypto commands limit column width. 
Contributed by Eric Badger
54c7d23 is described below

commit 54c7d23e582df80e50a14025a473ddabdf598417
Author: Kihwal Lee 
AuthorDate: Tue Jun 2 09:52:06 2020 -0500

HDFS-14931. hdfs crypto commands limit column width. Contributed by Eric 
Badger

(cherry picked from commit 9ef6ed9c1c83b9752e772ece7a716a33045752bf)
---
 .../src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
index 14abf6e..27dae37 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
@@ -182,7 +182,7 @@ public class CryptoAdmin extends Configured implements Tool 
{
   try {
 final TableListing listing = new TableListing.Builder()
   .addField("").addField("", true)
-  .wrapWidth(AdminHelper.MAX_LINE_WIDTH).hideHeaders().build();
+  .hideHeaders().build();
 final RemoteIterator it = admin.listEncryptionZones();
 while (it.hasNext()) {
   EncryptionZone ez = it.next();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-17035. fixed typos (timeout, interruped) (#2007)

2020-05-12 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a3f945f  HADOOP-17035. fixed typos (timeout, interruped) (#2007)
a3f945f is described below

commit a3f945fb8466d461d42ce60f0bc12c96fbb2db23
Author: Elixir Kook 
AuthorDate: Wed May 13 00:50:04 2020 +0900

HADOOP-17035. fixed typos (timeout, interruped) (#2007)

Co-authored-by: Sungpeo Kook 
---
 .../src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java  | 2 +-
 .../main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java| 2 +-
 .../src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java | 4 ++--
 .../hadoop-yarn-site/src/site/markdown/GracefulDecommission.md| 4 ++--
 4 files changed, 6 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
index 272eae7..76c74a3 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
@@ -40,7 +40,7 @@ import org.slf4j.LoggerFactory;
 import static org.junit.Assert.*;
 
 /**
- * This tests timout out from SocketInputStream and
+ * This tests timeout out from SocketInputStream and
  * SocketOutputStream using pipes.
  * 
  * Normal read and write using these streams are tested by pretty much
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index d390c1e..c772d8f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3431,7 +3431,7 @@ public class DataNode extends ReconfigurableBase
   unhealthyVolumes = volumeChecker.checkAllVolumes(data);
   lastDiskErrorCheck = Time.monotonicNow();
 } catch (InterruptedException e) {
-  LOG.error("Interruped while running disk check", e);
+  LOG.error("Interrupted while running disk check", e);
   throw new IOException("Interrupted while running disk check", e);
 }
 
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 2c2ff1f..7491f21 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -225,7 +225,7 @@ public class ClientServiceDelegate {
 try {
   Thread.sleep(2000);
 } catch (InterruptedException e1) {
-  LOG.warn("getProxy() call interruped", e1);
+  LOG.warn("getProxy() call interrupted", e1);
   throw new YarnRuntimeException(e1);
 }
 try {
@@ -239,7 +239,7 @@ public class ClientServiceDelegate {
   return checkAndGetHSProxy(null, JobState.RUNNING);
 }
   } catch (InterruptedException e) {
-LOG.warn("getProxy() call interruped", e);
+LOG.warn("getProxy() call interrupted", e);
 throw new YarnRuntimeException(e);
   } catch (YarnException e) {
 throw new IOException(e);
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
index 2e83ca2..e7ce657 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
@@ -58,7 +58,7 @@ Features
 `yarn rmadmin -refreshNodes [-g [timeout in seconds] -client|server]` notifies 
NodesListManager to detect and handle include and exclude hosts changes. 
NodesListManager loads excluded hosts from the exclude file as specified 
through the `yarn.resourcemanager.nodes.exclude-path` configuration in 
yarn-site.xml. (Note:  It is unnecessary to restart RM in case of changing the 
exclude-path 
 as this config will be read again for every `refreshNodes` command)
 
-The format of the file could be plain

[hadoop] branch trunk updated: HDFS-15350. Set dfs.client.failover.random.order to true as default. (#2008)

2020-05-12 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 928b81a  HDFS-15350. Set dfs.client.failover.random.order to true as 
default. (#2008)
928b81a is described below

commit 928b81a5339a3d91e77b268d825973a0d9efc1ab
Author: Takanobu Asanuma 
AuthorDate: Tue May 12 23:04:03 2020 +0900

HDFS-15350. Set dfs.client.failover.random.order to true as default. (#2008)
---
 .../main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java   | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml | 2 +-
 .../hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java   | 1 +
 3 files changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index efc2766..ab3f6f2 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -343,7 +343,7 @@ public interface HdfsClientConfigKeys {
 PREFIX + "connection.retries.on.timeouts";
 int CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
 String  RANDOM_ORDER = PREFIX + "random.order";
-boolean RANDOM_ORDER_DEFAULT = false;
+boolean RANDOM_ORDER_DEFAULT = true;
 String  RESOLVE_ADDRESS_NEEDED_KEY = PREFIX + "resolve-needed";
 boolean RESOLVE_ADDRESS_NEEDED_DEFAULT = false;
 String RESOLVE_SERVICE_KEY = PREFIX + "resolver.impl";
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 0dc2799..76b4a49 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4070,7 +4070,7 @@
 
 
   dfs.client.failover.random.order
-  false
+  true
   
 Determines if the failover proxies are picked in random order instead of 
the
 configured order. Random order may be enabled for better load balancing
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
index e23bb24..630789b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
@@ -78,6 +78,7 @@ public class TestObserverReadProxyProvider {
 // transition observer back and forth
 conf.setTimeDuration(
 OBSERVER_PROBE_RETRY_PERIOD_KEY, 0, TimeUnit.MILLISECONDS);
+conf.setBoolean(HdfsClientConfigKeys.Failover.RANDOM_ORDER, false);
   }
 
   private void setupProxyProvider(int namenodeCount) throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-10499. TestNameNodeMetadataConsistency#testGenerationStampInFuture Fails Intermittently. Contributed by Yiqun Lin and Ahmed Hussein.

2020-04-30 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new be60700  HDFS-10499. 
TestNameNodeMetadataConsistency#testGenerationStampInFuture Fails 
Intermittently. Contributed by Yiqun Lin and Ahmed Hussein.
be60700 is described below

commit be6070081d36ab634127f96a293f5a6dc7f9121e
Author: Kihwal Lee 
AuthorDate: Thu Apr 30 16:29:40 2020 -0500

HDFS-10499. TestNameNodeMetadataConsistency#testGenerationStampInFuture 
Fails Intermittently. Contributed by Yiqun Lin and Ahmed Hussein.
---
 .../namenode/TestNameNodeMetadataConsistency.java  | 62 --
 1 file changed, 33 insertions(+), 29 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
index cbb5215..ff78a00 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
@@ -17,6 +17,7 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -29,26 +30,31 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.util.concurrent.TimeUnit;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 public class TestNameNodeMetadataConsistency {
+
   private static final Path filePath1 = new Path("/testdata1.txt");
   private static final Path filePath2 = new Path("/testdata2.txt");
-  private static final String TEST_DATA_IN_FUTURE = "This is test data";
+  private static final byte[] TEST_DATA_IN_FUTURE =
+  "This is test data".getBytes();
+  private static final int TEST_DATA_IN_FUTURE_LENGTH =
+  TEST_DATA_IN_FUTURE.length;
 
   private static final int SCAN_INTERVAL = 1;
-  private static final int SCAN_WAIT = 3;
-  MiniDFSCluster cluster;
-  HdfsConfiguration conf;
+  private static final int WAIT_TIME_MS = 500;
+  private static final int MAX_WAIT_TIME_MS = 6;
+  private MiniDFSCluster cluster;
+  private HdfsConfiguration conf;
 
   @Before
   public void InitTest() throws IOException {
@@ -74,13 +80,11 @@ public class TestNameNodeMetadataConsistency {
* safe mode while it is in startup mode.
*/
   @Test
-  public void testGenerationStampInFuture() throws
-  IOException, InterruptedException {
+  public void testGenerationStampInFuture() throws Exception {
 cluster.waitActive();
-
 FileSystem fs = cluster.getFileSystem();
 OutputStream ostream = fs.create(filePath1);
-ostream.write(TEST_DATA_IN_FUTURE.getBytes());
+ostream.write(TEST_DATA_IN_FUTURE);
 ostream.close();
 
 // Re-write the Generation Stamp to a Generation Stamp in future.
@@ -107,12 +111,10 @@ public class TestNameNodeMetadataConsistency {
 cluster.getNameNode().getNamesystem().getBlockManager());
 
 cluster.restartDataNode(dnProps);
-waitTil(TimeUnit.SECONDS.toMillis(SCAN_WAIT));
-cluster.triggerBlockReports();
-waitTil(TimeUnit.SECONDS.toMillis(SCAN_WAIT));
+waitForNumBytes(TEST_DATA_IN_FUTURE_LENGTH);
 
 // Make sure that we find all written bytes in future block
-assertEquals(TEST_DATA_IN_FUTURE.length(),
+assertEquals(TEST_DATA_IN_FUTURE_LENGTH,
 cluster.getNameNode().getBytesWithFutureGenerationStamps());
 // Assert safemode reason
 assertTrue(cluster.getNameNode().getNamesystem().getSafeModeTip().contains(
@@ -124,16 +126,13 @@ public class TestNameNodeMetadataConsistency {
* hence we should not have positive count of Blocks in future.
*/
   @Test
-  public void testEnsureGenStampsIsStartupOnly() throws
-  IOException, InterruptedException {
-
-String testData = " This is test data";
+  public void testEnsureGenStampsIsStartupOnly() throws Exception {
 cluster.restartDataNodes();
 cluster.restartNameNodes();
 cluster.waitActive();
 FileSystem fs = cluster.getFileSystem();
 OutputStream ostream = fs.create(filePath2);
-ostream.write(testData.getBytes());
+ostream.write(TEST_DATA_IN_FUTURE)

[hadoop] branch branch-2.10 updated: HDFS-15147. LazyPersistTestCase wait logic is error-prone. Contributed by Ahmed Hussein.

2020-02-27 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 232e9f8  HDFS-15147. LazyPersistTestCase wait logic is error-prone. 
Contributed by Ahmed Hussein.
232e9f8 is described below

commit 232e9f8ee117a29a2f8f1360bebd8d0e8def826a
Author: Kihwal Lee 
AuthorDate: Thu Feb 27 09:58:44 2020 -0600

HDFS-15147. LazyPersistTestCase wait logic is error-prone. Contributed
by Ahmed Hussein.
---
 .../hdfs/server/blockmanagement/BlockManager.java  |  11 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  18 +-
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |   2 +-
 .../fsdataset/impl/LazyPersistTestCase.java| 234 ++---
 .../fsdataset/impl/TestLazyPersistFiles.java   |  77 +++
 .../impl/TestLazyPersistReplicaPlacement.java  |   2 +-
 .../datanode/fsdataset/impl/TestLazyWriter.java|   6 +-
 7 files changed, 260 insertions(+), 90 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index fd8739e..5addf5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -46,9 +46,7 @@ import java.util.concurrent.FutureTask;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-
 import javax.management.ObjectName;
-
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -233,6 +231,8 @@ public class BlockManager implements BlockStatsMXBean {
 
   /** Replication thread. */
   final Daemon replicationThread = new Daemon(new ReplicationMonitor());
+  /** Timestamp for the last cycle of the redundancy thread. */
+  private final AtomicLong lastReplicationCycleTS = new AtomicLong(-1);
   
   /** Block report thread for handling async reports. */
   private final BlockReportProcessingThread blockReportThread =
@@ -3986,11 +3986,15 @@ public class BlockManager implements BlockStatsMXBean {
 return neededReplications.size();
   }
 
+  @VisibleForTesting
+  public long getLastReplicationCycleTS() {
+return lastReplicationCycleTS.get();
+  }
+
   /**
* Periodically calls computeReplicationWork().
*/
   private class ReplicationMonitor implements Runnable {
-
 @Override
 public void run() {
   while (namesystem.isRunning()) {
@@ -4000,6 +4004,7 @@ public class BlockManager implements BlockStatsMXBean {
 computeDatanodeWork();
 processPendingReplications();
 rescanPostponedMisreplicatedBlocks();
+lastReplicationCycleTS.set(Time.monotonicNow());
   }
   Thread.sleep(replicationRecheckInterval);
 } catch (Throwable t) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f0af5b4..11ac3fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -87,9 +87,12 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
+
+import java.util.concurrent.atomic.AtomicLong;
+
 import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
 import static org.apache.hadoop.ha.HAServiceProtocol.HAServiceState.ACTIVE;
 import static org.apache.hadoop.ha.HAServiceProtocol.HAServiceState.OBSERVER;
@@ -294,6 +297,7 @@ import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Appender;
 import org.apache.log4j.AsyncAppender

[hadoop] branch branch-3.1 updated: HDFS-15147. LazyPersistTestCase wait logic is error-prone. Contributed by Ahmed Hussein.

2020-02-27 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new b92477c  HDFS-15147. LazyPersistTestCase wait logic is error-prone. 
Contributed by Ahmed Hussein.
b92477c is described below

commit b92477c638c9b9235868bfd13518e36545139c90
Author: Kihwal Lee 
AuthorDate: Thu Feb 27 09:45:12 2020 -0600

HDFS-15147. LazyPersistTestCase wait logic is error-prone. Contributed
by Ahmed Hussein.

(cherry picked from commit 27cfda708ef66dfbe5f52a5f1e716298a294f3f7)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
---
 .../java/org/apache/hadoop/util/ThreadUtil.java|  28 ++
 .../org/apache/hadoop/test/GenericTestUtils.java   |  15 +-
 .../hdfs/server/blockmanagement/BlockManager.java  |  21 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  31 +-
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |  14 +-
 .../fsdataset/impl/LazyPersistTestCase.java| 356 -
 .../fsdataset/impl/TestLazyPersistFiles.java   |  69 ++--
 .../impl/TestLazyPersistReplicaPlacement.java  |   2 +-
 .../datanode/fsdataset/impl/TestLazyWriter.java|   6 +-
 9 files changed, 402 insertions(+), 140 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
index 2cda8a4..f9ea3fc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
@@ -50,6 +50,34 @@ public class ThreadUtil {
   }
 
   /**
+   * Join a thread as uninterruptible.
+   * The call continues to block until the result is available even when the
+   * caller thread is interrupted.
+   * The method will log any {@link InterruptedException} then will 
re-interrupt
+   * the thread.
+   *
+   * @param toJoin the thread to Join on.
+   */
+  public static void joinUninterruptibly(Thread toJoin) {
+boolean interrupted = false;
+try {
+  while (true) {
+try {
+  toJoin.join();
+  return;
+} catch (InterruptedException e) {
+  interrupted = true;
+  LOG.warn("interrupted while sleeping", e);
+}
+  }
+} finally {
+  if (interrupted) {
+Thread.currentThread().interrupt();
+  }
+}
+  }
+
+  /**
* Convenience method that returns a resource as inputstream from the
* classpath.
* 
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 5479907..ba5644f 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -60,7 +60,6 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Sets;
 
@@ -369,11 +368,15 @@ public abstract class GenericTestUtils {
* time
* @throws InterruptedException if the method is interrupted while waiting
*/
-  public static void waitFor(Supplier check, int checkEveryMillis,
-  int waitForMillis) throws TimeoutException, InterruptedException {
-Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
-Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
-ERROR_INVALID_ARGUMENT);
+  public static void waitFor(final Supplier check,
+  final long checkEveryMillis, final long waitForMillis)
+  throws TimeoutException, InterruptedException {
+if (check == null) {
+  throw new NullPointerException(ERROR_MISSING_ARGUMENT);
+}
+if (waitForMillis < checkEveryMillis) {
+  throw new IllegalArgumentException(ERROR_INVALID_ARGUMENT);
+}
 
 long st = Time.monotonicNow();
 boolean result = check.get();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 34c6fd1..17d5603 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main

[hadoop] branch branch-3.1 updated: HDFS-15147. LazyPersistTestCase wait logic is error-prone. Contributed by Ahmed Hussein.

2020-02-27 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new b92477c  HDFS-15147. LazyPersistTestCase wait logic is error-prone. 
Contributed by Ahmed Hussein.
b92477c is described below

commit b92477c638c9b9235868bfd13518e36545139c90
Author: Kihwal Lee 
AuthorDate: Thu Feb 27 09:45:12 2020 -0600

HDFS-15147. LazyPersistTestCase wait logic is error-prone. Contributed
by Ahmed Hussein.

(cherry picked from commit 27cfda708ef66dfbe5f52a5f1e716298a294f3f7)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
---
 .../java/org/apache/hadoop/util/ThreadUtil.java|  28 ++
 .../org/apache/hadoop/test/GenericTestUtils.java   |  15 +-
 .../hdfs/server/blockmanagement/BlockManager.java  |  21 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  31 +-
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |  14 +-
 .../fsdataset/impl/LazyPersistTestCase.java| 356 -
 .../fsdataset/impl/TestLazyPersistFiles.java   |  69 ++--
 .../impl/TestLazyPersistReplicaPlacement.java  |   2 +-
 .../datanode/fsdataset/impl/TestLazyWriter.java|   6 +-
 9 files changed, 402 insertions(+), 140 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
index 2cda8a4..f9ea3fc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
@@ -50,6 +50,34 @@ public class ThreadUtil {
   }
 
   /**
+   * Join a thread as uninterruptible.
+   * The call continues to block until the result is available even when the
+   * caller thread is interrupted.
+   * The method will log any {@link InterruptedException} then will 
re-interrupt
+   * the thread.
+   *
+   * @param toJoin the thread to Join on.
+   */
+  public static void joinUninterruptibly(Thread toJoin) {
+boolean interrupted = false;
+try {
+  while (true) {
+try {
+  toJoin.join();
+  return;
+} catch (InterruptedException e) {
+  interrupted = true;
+  LOG.warn("interrupted while sleeping", e);
+}
+  }
+} finally {
+  if (interrupted) {
+Thread.currentThread().interrupt();
+  }
+}
+  }
+
+  /**
* Convenience method that returns a resource as inputstream from the
* classpath.
* 
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 5479907..ba5644f 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -60,7 +60,6 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Sets;
 
@@ -369,11 +368,15 @@ public abstract class GenericTestUtils {
* time
* @throws InterruptedException if the method is interrupted while waiting
*/
-  public static void waitFor(Supplier check, int checkEveryMillis,
-  int waitForMillis) throws TimeoutException, InterruptedException {
-Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
-Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
-ERROR_INVALID_ARGUMENT);
+  public static void waitFor(final Supplier check,
+  final long checkEveryMillis, final long waitForMillis)
+  throws TimeoutException, InterruptedException {
+if (check == null) {
+  throw new NullPointerException(ERROR_MISSING_ARGUMENT);
+}
+if (waitForMillis < checkEveryMillis) {
+  throw new IllegalArgumentException(ERROR_INVALID_ARGUMENT);
+}
 
 long st = Time.monotonicNow();
 boolean result = check.get();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 34c6fd1..17d5603 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main

[hadoop] branch branch-3.2 updated: HDFS-15147. LazyPersistTestCase wait logic is error-prone. Contributed by Ahmed Hussein.

2020-02-27 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 27cfda7  HDFS-15147. LazyPersistTestCase wait logic is error-prone. 
Contributed by Ahmed Hussein.
27cfda7 is described below

commit 27cfda708ef66dfbe5f52a5f1e716298a294f3f7
Author: Kihwal Lee 
AuthorDate: Thu Feb 27 09:17:44 2020 -0600

HDFS-15147. LazyPersistTestCase wait logic is error-prone. Contributed
by Ahmed Hussein.
---
 .../java/org/apache/hadoop/util/ThreadUtil.java|  28 ++
 .../org/apache/hadoop/test/GenericTestUtils.java   |  15 +-
 .../hdfs/server/blockmanagement/BlockManager.java  |  20 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  30 +-
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |  14 +-
 .../fsdataset/impl/LazyPersistTestCase.java| 354 -
 .../fsdataset/impl/TestLazyPersistFiles.java   |  69 ++--
 .../impl/TestLazyPersistReplicaPlacement.java  |   2 +-
 .../datanode/fsdataset/impl/TestLazyWriter.java|   6 +-
 9 files changed, 397 insertions(+), 141 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
index 2cda8a4..f9ea3fc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
@@ -50,6 +50,34 @@ public class ThreadUtil {
   }
 
   /**
+   * Join a thread as uninterruptible.
+   * The call continues to block until the result is available even when the
+   * caller thread is interrupted.
+   * The method will log any {@link InterruptedException} then will 
re-interrupt
+   * the thread.
+   *
+   * @param toJoin the thread to Join on.
+   */
+  public static void joinUninterruptibly(Thread toJoin) {
+boolean interrupted = false;
+try {
+  while (true) {
+try {
+  toJoin.join();
+  return;
+} catch (InterruptedException e) {
+  interrupted = true;
+  LOG.warn("interrupted while sleeping", e);
+}
+  }
+} finally {
+  if (interrupted) {
+Thread.currentThread().interrupt();
+  }
+}
+  }
+
+  /**
* Convenience method that returns a resource as inputstream from the
* classpath.
* 
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 0082452..9e91634 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -61,7 +61,6 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Sets;
 
@@ -378,11 +377,15 @@ public abstract class GenericTestUtils {
* time
* @throws InterruptedException if the method is interrupted while waiting
*/
-  public static void waitFor(Supplier check, int checkEveryMillis,
-  int waitForMillis) throws TimeoutException, InterruptedException {
-Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
-Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
-ERROR_INVALID_ARGUMENT);
+  public static void waitFor(final Supplier check,
+  final long checkEveryMillis, final long waitForMillis)
+  throws TimeoutException, InterruptedException {
+if (check == null) {
+  throw new NullPointerException(ERROR_MISSING_ARGUMENT);
+}
+if (waitForMillis < checkEveryMillis) {
+  throw new IllegalArgumentException(ERROR_INVALID_ARGUMENT);
+}
 
 long st = Time.monotonicNow();
 boolean result = check.get();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e7422df..3185f1d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -49,6 +49,7 @@ import java.util.concurrent.FutureTask;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
+import java.util.concurrent.atomic.AtomicLong;
 import javax.management.ObjectName;
 
 import org.apache.hadoop.HadoopIllegalA

[hadoop] branch branch-3.2 updated: Revert "HADOOP-16888. [JDK11] Support JDK11 in the precommit job. Contributed by"

2020-02-27 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new de7edf5  Revert "HADOOP-16888. [JDK11] Support JDK11 in the precommit 
job. Contributed by"
de7edf5 is described below

commit de7edf58bd37a7729301d897c6eda952b9692698
Author: Kihwal Lee 
AuthorDate: Thu Feb 27 09:16:55 2020 -0600

Revert "HADOOP-16888. [JDK11] Support JDK11 in the precommit job. 
Contributed by"

Incorrect commit message

This reverts commit 749d7c0027b8978f9c07af04031a8fad6d7c18e1.
---
 .../java/org/apache/hadoop/util/ThreadUtil.java|  28 --
 .../org/apache/hadoop/test/GenericTestUtils.java   |  15 +-
 .../hdfs/server/blockmanagement/BlockManager.java  |  20 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  30 +-
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |  14 +-
 .../fsdataset/impl/LazyPersistTestCase.java| 354 +
 .../fsdataset/impl/TestLazyPersistFiles.java   |  69 ++--
 .../impl/TestLazyPersistReplicaPlacement.java  |   2 +-
 .../datanode/fsdataset/impl/TestLazyWriter.java|   6 +-
 9 files changed, 141 insertions(+), 397 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
index f9ea3fc..2cda8a4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
@@ -50,34 +50,6 @@ public class ThreadUtil {
   }
 
   /**
-   * Join a thread as uninterruptible.
-   * The call continues to block until the result is available even when the
-   * caller thread is interrupted.
-   * The method will log any {@link InterruptedException} then will 
re-interrupt
-   * the thread.
-   *
-   * @param toJoin the thread to Join on.
-   */
-  public static void joinUninterruptibly(Thread toJoin) {
-boolean interrupted = false;
-try {
-  while (true) {
-try {
-  toJoin.join();
-  return;
-} catch (InterruptedException e) {
-  interrupted = true;
-  LOG.warn("interrupted while sleeping", e);
-}
-  }
-} finally {
-  if (interrupted) {
-Thread.currentThread().interrupt();
-  }
-}
-  }
-
-  /**
* Convenience method that returns a resource as inputstream from the
* classpath.
* 
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 9e91634..0082452 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -61,6 +61,7 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Sets;
 
@@ -377,15 +378,11 @@ public abstract class GenericTestUtils {
* time
* @throws InterruptedException if the method is interrupted while waiting
*/
-  public static void waitFor(final Supplier check,
-  final long checkEveryMillis, final long waitForMillis)
-  throws TimeoutException, InterruptedException {
-if (check == null) {
-  throw new NullPointerException(ERROR_MISSING_ARGUMENT);
-}
-if (waitForMillis < checkEveryMillis) {
-  throw new IllegalArgumentException(ERROR_INVALID_ARGUMENT);
-}
+  public static void waitFor(Supplier check, int checkEveryMillis,
+  int waitForMillis) throws TimeoutException, InterruptedException {
+Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
+Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
+ERROR_INVALID_ARGUMENT);
 
 long st = Time.monotonicNow();
 boolean result = check.get();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3185f1d..e7422df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -49,7 +49,6 @@ import java.util.concurrent.FutureTask;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
-import java.util.c

[hadoop] branch branch-3.2 updated: HADOOP-16888. [JDK11] Support JDK11 in the precommit job. Contributed by Ahmed Hussein.

2020-02-27 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 749d7c0  HADOOP-16888. [JDK11] Support JDK11 in the precommit job. 
Contributed by Ahmed Hussein.
749d7c0 is described below

commit 749d7c0027b8978f9c07af04031a8fad6d7c18e1
Author: Kihwal Lee 
AuthorDate: Thu Feb 27 09:13:20 2020 -0600

HADOOP-16888. [JDK11] Support JDK11 in the precommit job. Contributed by
Ahmed Hussein.
---
 .../java/org/apache/hadoop/util/ThreadUtil.java|  28 ++
 .../org/apache/hadoop/test/GenericTestUtils.java   |  15 +-
 .../hdfs/server/blockmanagement/BlockManager.java  |  20 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  30 +-
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |  14 +-
 .../fsdataset/impl/LazyPersistTestCase.java| 354 -
 .../fsdataset/impl/TestLazyPersistFiles.java   |  69 ++--
 .../impl/TestLazyPersistReplicaPlacement.java  |   2 +-
 .../datanode/fsdataset/impl/TestLazyWriter.java|   6 +-
 9 files changed, 397 insertions(+), 141 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
index 2cda8a4..f9ea3fc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
@@ -50,6 +50,34 @@ public class ThreadUtil {
   }
 
   /**
+   * Join a thread as uninterruptible.
+   * The call continues to block until the result is available even when the
+   * caller thread is interrupted.
+   * The method will log any {@link InterruptedException} then will 
re-interrupt
+   * the thread.
+   *
+   * @param toJoin the thread to Join on.
+   */
+  public static void joinUninterruptibly(Thread toJoin) {
+boolean interrupted = false;
+try {
+  while (true) {
+try {
+  toJoin.join();
+  return;
+} catch (InterruptedException e) {
+  interrupted = true;
+  LOG.warn("interrupted while sleeping", e);
+}
+  }
+} finally {
+  if (interrupted) {
+Thread.currentThread().interrupt();
+  }
+}
+  }
+
+  /**
* Convenience method that returns a resource as inputstream from the
* classpath.
* 
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 0082452..9e91634 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -61,7 +61,6 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Sets;
 
@@ -378,11 +377,15 @@ public abstract class GenericTestUtils {
* time
* @throws InterruptedException if the method is interrupted while waiting
*/
-  public static void waitFor(Supplier check, int checkEveryMillis,
-  int waitForMillis) throws TimeoutException, InterruptedException {
-Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
-Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
-ERROR_INVALID_ARGUMENT);
+  public static void waitFor(final Supplier check,
+  final long checkEveryMillis, final long waitForMillis)
+  throws TimeoutException, InterruptedException {
+if (check == null) {
+  throw new NullPointerException(ERROR_MISSING_ARGUMENT);
+}
+if (waitForMillis < checkEveryMillis) {
+  throw new IllegalArgumentException(ERROR_INVALID_ARGUMENT);
+}
 
 long st = Time.monotonicNow();
 boolean result = check.get();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e7422df..3185f1d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -49,6 +49,7 @@ import java.util.concurrent.FutureTask;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
+import java.util.concurrent.atomic.AtomicLong;
 import javax.management.ObjectName;
 
 import org.apache.hadoop.HadoopIllegalA

[hadoop] branch trunk updated: HDFS-15147. LazyPersistTestCase wait logic is flawed. Contributed by Ahmed Hussein.

2020-02-26 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 352a4ec  HDFS-15147. LazyPersistTestCase wait logic is flawed. 
Contributed by Ahmed Hussein.
352a4ec is described below

commit 352a4ec16de7c39b13ea750600dfc9cc6915cb62
Author: Kihwal Lee 
AuthorDate: Wed Feb 26 09:33:29 2020 -0600

HDFS-15147. LazyPersistTestCase wait logic is flawed. Contributed by Ahmed 
Hussein.
---
 .../java/org/apache/hadoop/util/ThreadUtil.java|  28 ++
 .../org/apache/hadoop/test/GenericTestUtils.java   |  15 +-
 .../hdfs/server/blockmanagement/BlockManager.java  |  20 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  35 +-
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |  14 +-
 .../fsdataset/impl/LazyPersistTestCase.java| 356 -
 .../fsdataset/impl/TestLazyPersistFiles.java   |  69 ++--
 .../impl/TestLazyPersistReplicaPlacement.java  |   2 +-
 .../datanode/fsdataset/impl/TestLazyWriter.java|   6 +-
 9 files changed, 401 insertions(+), 144 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
index 2cda8a4..f9ea3fc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
@@ -50,6 +50,34 @@ public class ThreadUtil {
   }
 
   /**
+   * Join a thread as uninterruptible.
+   * The call continues to block until the result is available even when the
+   * caller thread is interrupted.
+   * The method will log any {@link InterruptedException} then will 
re-interrupt
+   * the thread.
+   *
+   * @param toJoin the thread to Join on.
+   */
+  public static void joinUninterruptibly(Thread toJoin) {
+boolean interrupted = false;
+try {
+  while (true) {
+try {
+  toJoin.join();
+  return;
+} catch (InterruptedException e) {
+  interrupted = true;
+  LOG.warn("interrupted while sleeping", e);
+}
+  }
+} finally {
+  if (interrupted) {
+Thread.currentThread().interrupt();
+  }
+}
+  }
+
+  /**
* Convenience method that returns a resource as inputstream from the
* classpath.
* 
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 0082452..9e91634 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -61,7 +61,6 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Sets;
 
@@ -378,11 +377,15 @@ public abstract class GenericTestUtils {
* time
* @throws InterruptedException if the method is interrupted while waiting
*/
-  public static void waitFor(Supplier check, int checkEveryMillis,
-  int waitForMillis) throws TimeoutException, InterruptedException {
-Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
-Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
-ERROR_INVALID_ARGUMENT);
+  public static void waitFor(final Supplier check,
+  final long checkEveryMillis, final long waitForMillis)
+  throws TimeoutException, InterruptedException {
+if (check == null) {
+  throw new NullPointerException(ERROR_MISSING_ARGUMENT);
+}
+if (waitForMillis < checkEveryMillis) {
+  throw new IllegalArgumentException(ERROR_INVALID_ARGUMENT);
+}
 
 long st = Time.monotonicNow();
 boolean result = check.get();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e2b22d3..cb031a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -47,6 +47,7 @@ import java.util.concurrent.FutureTask;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
+import java.util.concurrent.atomic.AtomicLong;
 import javax.management.ObjectName;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@

[hadoop] branch branch-2.10 updated: HDFS-13404. Addendum: RBF: TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fail. Contributed by Takanobu Asanuma.

2020-02-25 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 3b86011  HDFS-13404. Addendum: RBF: 
TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fail. Contributed 
by Takanobu Asanuma.
3b86011 is described below

commit 3b86011c24c441f2b46ef612ceaaede3298ebd31
Author: Kihwal Lee 
AuthorDate: Tue Feb 25 12:28:56 2020 -0600

HDFS-13404. Addendum: RBF: 
TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fail. Contributed 
by Takanobu Asanuma.

(cherry picked from commit b52fd05d42d9a76f6936a5d86c23fcd66244fe3d)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
---
 .../org/apache/hadoop/fs/contract/AbstractContractAppendTest.java   | 6 ++
 1 file changed, 6 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
index d61b635..a9fb117 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
@@ -133,6 +133,12 @@ public abstract class AbstractContractAppendTest extends 
AbstractFSContractTestB
 assertPathExists("original file does not exist", target);
 byte[] dataset = dataset(256, 'a', 'z');
 FSDataOutputStream outputStream = getFileSystem().append(target);
+if (isSupported(CREATE_VISIBILITY_DELAYED)) {
+  // Some filesystems like WebHDFS doesn't assure sequential consistency.
+  // In such a case, delay is needed. Given that we can not check the lease
+  // because here is closed in client side package, simply add a sleep.
+  Thread.sleep(100);
+}
 outputStream.write(dataset);
 Path renamed = new Path(testPath, "renamed");
 rename(target, renamed);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: Revert "HDFS-6874. Add GETFILEBLOCKLOCATIONS operation to HttpFS. Contributed by Weiwei Yang"

2020-02-25 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new e80b5ec  Revert "HDFS-6874. Add GETFILEBLOCKLOCATIONS operation to 
HttpFS.  Contributed by Weiwei Yang"
e80b5ec is described below

commit e80b5ec58dbad292ee4c3604d07e1f6c29603d34
Author: Kihwal Lee 
AuthorDate: Tue Feb 25 11:33:20 2020 -0600

Revert "HDFS-6874. Add GETFILEBLOCKLOCATIONS operation to HttpFS.  
Contributed by Weiwei Yang"

This reverts commit b4a108fa9f38ee028978474fe6c298bbd88fda7a.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
---
 .../hadoop/fs/http/client/HttpFSFileSystem.java| 42 --
 .../apache/hadoop/fs/http/server/FSOperations.java | 38 -
 .../fs/http/server/HttpFSParametersProvider.java   |  3 +-
 .../apache/hadoop/fs/http/server/HttpFSServer.java | 21 +
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  | 89 +-
 5 files changed, 3 insertions(+), 190 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 6e39f5b..596aef7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -23,12 +23,9 @@ import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.type.MapType;
 import com.google.common.base.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -124,8 +121,6 @@ public class HttpFSFileSystem extends FileSystem
   public static final String NEW_LENGTH_PARAM = "newlength";
   public static final String START_AFTER_PARAM = "startAfter";
   public static final String POLICY_NAME_PARAM = "storagepolicy";
-  public static final String OFFSET_PARAM = "offset";
-  public static final String LENGTH_PARAM = "length";
   public static final String SNAPSHOT_NAME_PARAM = "snapshotname";
   public static final String OLD_SNAPSHOT_NAME_PARAM = "oldsnapshotname";
 
@@ -218,7 +213,6 @@ public class HttpFSFileSystem extends FileSystem
 
   public static final String STORAGE_POLICIES_JSON = "BlockStoragePolicies";
   public static final String STORAGE_POLICY_JSON = "BlockStoragePolicy";
-  public static final String BLOCK_LOCATIONS_JSON = "BlockLocations";
 
   public static final int HTTP_TEMPORARY_REDIRECT = 307;
 
@@ -1429,42 +1423,6 @@ public class HttpFSFileSystem extends FileSystem
 return createStoragePolicy((JSONObject) json.get(STORAGE_POLICY_JSON));
   }
 
-  @Override
-  public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
-  long len) throws IOException {
-Map params = new HashMap();
-params.put(OP_PARAM, Operation.GETFILEBLOCKLOCATIONS.toString());
-params.put(OFFSET_PARAM, Long.toString(start));
-params.put(LENGTH_PARAM, Long.toString(len));
-HttpURLConnection conn =
-getConnection(Operation.GETFILEBLOCKLOCATIONS.getMethod(), params,
-file.getPath(), true);
-HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
-JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
-return toBlockLocations(json);
-  }
-
-  private BlockLocation[] toBlockLocations(JSONObject json)
-  throws IOException {
-ObjectMapper mapper = new ObjectMapper();
-MapType subType = mapper.getTypeFactory().constructMapType(
-Map.class,
-String.class,
-BlockLocation[].class);
-MapType rootType = mapper.getTypeFactory().constructMapType(
-Map.class,
-mapper.constructType(String.class),
-mapper.constructType(subType));
-
-Map> jsonMap = mapper
-.readValue(json.toJSONString(), rootType);
-Map locationMap = jsonMap
-.get(BLOCK_LOCATIONS_JSON);
-BlockLocation[] locationArray = locationMap.get(
-BlockLocation.class.getSimpleName()

[hadoop] branch branch-3.1 updated: HDFS-12459. Fix revert: Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. Contributed by Weiwei Yang.

2020-02-17 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new c54051d  HDFS-12459. Fix revert: Add new op GETFILEBLOCKLOCATIONS to 
WebHDFS REST API. Contributed by Weiwei Yang.
c54051d is described below

commit c54051def5d51c09f91b15888f00e63e11e11391
Author: Kihwal Lee 
AuthorDate: Mon Feb 17 16:07:03 2020 -0600

HDFS-12459. Fix revert: Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST 
API. Contributed by Weiwei Yang.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit 3ead525c71cba068e7abf1c76ad629bfeec10852)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
---
 .../hadoop/hdfs/web/resources/GetOpParam.java  |  12 +-
 .../web/resources/NamenodeWebHdfsMethods.java  |  13 ++
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  |  32 
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md   | 173 +
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java| 148 ++
 5 files changed, 377 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index 6dff47a..f9a5fa6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -34,8 +34,18 @@ public class GetOpParam extends HttpOpParam {
 GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
 GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
 
-/** GET_BLOCK_LOCATIONS is a private unstable op. */
+/**
+ * GET_BLOCK_LOCATIONS is a private/stable API op. It returns a
+ * {@link org.apache.hadoop.hdfs.protocol.LocatedBlocks}
+ * json object.
+ */
 GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
+/**
+ * GETFILEBLOCKLOCATIONS is the public op that complies with
+ * {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations}
+ * interface.
+ */
+GETFILEBLOCKLOCATIONS(false, HttpURLConnection.HTTP_OK),
 GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
 GETXATTRS(false, HttpURLConnection.HTTP_OK),
 GETTRASHROOT(false, HttpURLConnection.HTTP_OK),
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 62a643a..b2370d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -59,6 +59,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileStatus;
@@ -1117,6 +1118,18 @@ public class NamenodeWebHdfsMethods {
 .build();
   }
 }
+case GETFILEBLOCKLOCATIONS:
+{
+  final long offsetValue = offset.getValue();
+  final Long lengthValue = length.getValue();
+  LocatedBlocks locatedBlocks = getRpcClientProtocol()
+  .getBlockLocations(fullpath, offsetValue, lengthValue != null ?
+  lengthValue : Long.MAX_VALUE);
+  BlockLocation[] locations =
+  DFSUtilClient.locatedBlocks2Locations(locatedBlocks);
+  final String js = JsonUtil.toJsonString(locations);
+  return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+}
 case GET_BLOCK_LOCATIONS:
 {
   final long offsetValue = offset.getValue();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 115dab8..58a18d2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -592,4 +592,36 @@ public class JsonUtil {
 m.put("dirStatus", toJsonMap(snapshottableDirectoryStatus.getDirStatus()));
 return m;
   }
+
+  private static Map toJsonMap(
+  final Blo

[hadoop] 02/02: HDFS-12459. Fix revert: Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. Contributed by Weiwei Yang.

2020-02-17 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 38e97c2061d79459dc28eb4be1c331820ba77058
Author: Kihwal Lee 
AuthorDate: Mon Feb 17 15:49:48 2020 -0600

HDFS-12459. Fix revert: Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST 
API. Contributed by Weiwei Yang.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit 3ead525c71cba068e7abf1c76ad629bfeec10852)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
---
 .../hadoop/hdfs/web/resources/GetOpParam.java  |  12 +-
 .../web/resources/NamenodeWebHdfsMethods.java  |  13 ++
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  |  51 +++---
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md   | 173 +
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java| 149 ++
 5 files changed, 378 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index 1cb0a39..85f7aa4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -34,8 +34,18 @@ public class GetOpParam extends HttpOpParam {
 GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
 GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
 
-/** GET_BLOCK_LOCATIONS is a private unstable op. */
+/**
+ * GET_BLOCK_LOCATIONS is a private/stable API op. It returns a
+ * {@link org.apache.hadoop.hdfs.protocol.LocatedBlocks}
+ * json object.
+ */
 GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
+/**
+ * GETFILEBLOCKLOCATIONS is the public op that complies with
+ * {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations}
+ * interface.
+ */
+GETFILEBLOCKLOCATIONS(false, HttpURLConnection.HTTP_OK),
 GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
 GETXATTRS(false, HttpURLConnection.HTTP_OK),
 GETTRASHROOT(false, HttpURLConnection.HTTP_OK),
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 4fef4a6..973fe71 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -58,6 +58,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileStatus;
@@ -1013,6 +1014,18 @@ public class NamenodeWebHdfsMethods {
 .build();
   }
 }
+case GETFILEBLOCKLOCATIONS:
+{
+  final long offsetValue = offset.getValue();
+  final Long lengthValue = length.getValue();
+  LocatedBlocks locatedBlocks = getRpcClientProtocol()
+  .getBlockLocations(fullpath, offsetValue, lengthValue != null ?
+  lengthValue : Long.MAX_VALUE);
+  BlockLocation[] locations =
+  DFSUtilClient.locatedBlocks2Locations(locatedBlocks);
+  final String js = JsonUtil.toJsonString(locations);
+  return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+}
 case GET_BLOCK_LOCATIONS:
 {
   final long offsetValue = offset.getValue();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 585bf0f..b626c3d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -509,25 +509,6 @@ public class JsonUtil {
 return m;
   }
 
-  public static Map toJsonMap(
-  final BlockLocation blockLocation) throws IOException {
-if (blockLocation == null) {
-  return null;
-}
-
-final Map m = new TreeMap();
-m.put("length", blockLocation.getLength());
-m.put("offset", blockLocation.getOffset());
-m.put("corrupt", blockLocation.isCorrup

[hadoop] branch branch-2.10 updated (9e34e6a -> 38e97c2)

2020-02-17 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a change to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 9e34e6a  HDFS-15164. Fix TestDelegationTokensWithHA. Contributed by 
Ayush Saxena.
 new e847768  Revert "HDFS-11156. Add new op GETFILEBLOCKLOCATIONS to 
WebHDFS REST API. Contributed by Weiwei Yang."
 new 38e97c2  HDFS-12459. Fix revert: Add new op GETFILEBLOCKLOCATIONS to 
WebHDFS REST API. Contributed by Weiwei Yang.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java |  54 --
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  62 +--
 .../web/resources/NamenodeWebHdfsMethods.java  |  15 +-
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  |  51 +++---
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md   |  17 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java| 190 -
 6 files changed, 75 insertions(+), 314 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: Revert "HDFS-11156. Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. Contributed by Weiwei Yang."

2020-02-17 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit e847768c3ff35e9c1408e820c4718840d351b468
Author: Kihwal Lee 
AuthorDate: Mon Feb 17 15:06:00 2020 -0600

Revert "HDFS-11156. Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. 
Contributed by Weiwei Yang."

This reverts commit d4ca1c5226521c4f9c609bb8ec9f64a63bd8bef1.

-JsonUtil not reverted. A later commit in httpfs uses the new methods.
-jackson databind was introduced in pom.xml. This is not reverted as
later changes are using it.
---
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java |  54 
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  62 +
 .../hadoop/hdfs/web/resources/GetOpParam.java  |  12 +-
 .../web/resources/NamenodeWebHdfsMethods.java  |  16 --
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md   | 188 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java| 277 -
 6 files changed, 6 insertions(+), 603 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 1fb7dea..8e1b527 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.web;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary.Builder;
 import org.apache.hadoop.fs.FileChecksum;
@@ -683,59 +682,6 @@ class JsonUtilClient {
 }
   }
 
-  static BlockLocation[] toBlockLocationArray(Map json)
-  throws IOException{
-final Map rootmap =
-(Map)json.get(BlockLocation.class.getSimpleName() + "s");
-final List array = JsonUtilClient.getList(rootmap,
-BlockLocation.class.getSimpleName());
-
-Preconditions.checkNotNull(array);
-final BlockLocation[] locations = new BlockLocation[array.size()];
-int i = 0;
-for (Object object : array) {
-  final Map m = (Map) object;
-  locations[i++] = JsonUtilClient.toBlockLocation(m);
-}
-return locations;
-  }
-
-  /** Convert a Json map to BlockLocation. **/
-  static BlockLocation toBlockLocation(Map m)
-  throws IOException{
-if(m == null) {
-  return null;
-}
-
-long length = ((Number) m.get("length")).longValue();
-long offset = ((Number) m.get("offset")).longValue();
-boolean corrupt = Boolean.
-getBoolean(m.get("corrupt").toString());
-String[] storageIds = toStringArray(getList(m, "storageIds"));
-String[] cachedHosts = toStringArray(getList(m, "cachedHosts"));
-String[] hosts = toStringArray(getList(m, "hosts"));
-String[] names = toStringArray(getList(m, "names"));
-String[] topologyPaths = toStringArray(getList(m, "topologyPaths"));
-StorageType[] storageTypes = toStorageTypeArray(
-getList(m, "storageTypes"));
-return new BlockLocation(names, hosts, cachedHosts,
-topologyPaths, storageIds, storageTypes,
-offset, length, corrupt);
-  }
-
-  static String[] toStringArray(List list) {
-if (list == null) {
-  return null;
-} else {
-  final String[] array = new String[list.size()];
-  int i = 0;
-  for (Object object : list) {
-array[i++] = object.toString();
-  }
-  return array;
-}
-  }
-
   /*
* The parameters which have default value -1 are required fields according
* to hdfs.proto.
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 67db5b3b..7e4b3d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -1706,68 +1706,14 @@ public class WebHdfsFileSystem extends FileSystem
   final long offset, final long length) throws IOException {
 statistics.incrementReadOps(1);
 storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
-BlockLocation[] locations = null;
-try {
-  locations = getFileBlockLocations(
-  GetOpParam.Op.GETFILEBLOCKLOCATIONS,
-  p, offset, length);
-} catch (RemoteException e) {
-  // See

[hadoop] branch branch-2.8 updated: HDFS-14758. Make lease hard limit configurable and reduce the default. Contributed by hemanthboyina.

2020-02-11 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new 8fd368b  HDFS-14758. Make lease hard limit configurable and reduce the 
default. Contributed by hemanthboyina.
8fd368b is described below

commit 8fd368b44e4084c229b16a04c793d3f8c00ca4a6
Author: Kihwal Lee 
AuthorDate: Tue Feb 11 16:36:50 2020 -0600

HDFS-14758. Make lease hard limit configurable and reduce the default.
Contributed by hemanthboyina.

(cherry picked from commit 9b8a78d97bfd825ce840c6033371c7f10e49a5b8)
(cherry picked from commit f1840669cfc92c033521bf95989479523e2a649d)
(cherry picked from commit bf09bfa2db7c0790f72d6d7d357e0b80c9eff1f2)
(cherry picked from commit 28619362288fbaf9c3757ee3ef770f575f7a6916)
(cherry picked from commit e2ea7089ee4376fcddec6c9c5b52e11dfdaf5d4d)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++--
 .../org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java  |  3 +++
 .../org/apache/hadoop/hdfs/client/impl/DfsClientConf.java| 12 
 .../java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java  | 11 ---
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  5 +
 .../hadoop/hdfs/server/common/HdfsServerConstants.java   |  5 ++---
 .../org/apache/hadoop/hdfs/server/namenode/LeaseManager.java |  7 ++-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  |  7 +++
 .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java|  5 +++--
 .../src/test/java/org/apache/hadoop/hdfs/TestLease.java  |  5 +++--
 .../test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java |  6 +++---
 .../apache/hadoop/hdfs/server/namenode/TestFileTruncate.java |  3 ++-
 .../snapshot/TestINodeFileUnderConstructionWithSnapshot.java |  5 +++--
 13 files changed, 51 insertions(+), 27 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
old mode 100644
new mode 100755
index de00da9..7c68b8e
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -563,10 +563,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   } catch (IOException e) {
 // Abort if the lease has already expired.
 final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
-if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
+if (elapsed > dfsClientConf.getleaseHardLimitPeriod()) {
   LOG.warn("Failed to renew lease for " + clientName + " for "
   + (elapsed/1000) + " seconds (>= hard-limit ="
-  + (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000) + " seconds.) "
+  + (dfsClientConf.getleaseHardLimitPeriod() / 1000) + " seconds.) 
"
   + "Closing all files being written ...", e);
   closeAllFilesBeingWritten(true);
 } else {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
old mode 100644
new mode 100755
index 815260f..0018d566
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -189,6 +189,9 @@ public interface HdfsClientConfigKeys {
   "dfs.data.transfer.client.tcpnodelay";
   boolean DFS_DATA_TRANSFER_CLIENT_TCPNODELAY_DEFAULT = true;
 
+  String DFS_LEASE_HARDLIMIT_KEY = "dfs.namenode.lease-hard-limit-sec";
+  long DFS_LEASE_HARDLIMIT_DEFAULT = 20 * 60;
+
   /**
* These are deprecated config keys to client code.
*/
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index 316d374..8c75d03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -138,6 +138,7 @@ public class DfsClientConf 

[hadoop] branch branch-2.9 updated: HDFS-14758. Make lease hard limit configurable and reduce the default. Contributed by hemanthboyina.

2020-02-11 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new e2ea708  HDFS-14758. Make lease hard limit configurable and reduce the 
default. Contributed by hemanthboyina.
e2ea708 is described below

commit e2ea7089ee4376fcddec6c9c5b52e11dfdaf5d4d
Author: Kihwal Lee 
AuthorDate: Tue Feb 11 16:31:36 2020 -0600

HDFS-14758. Make lease hard limit configurable and reduce the default.
Contributed by hemanthboyina.

(cherry picked from commit 9b8a78d97bfd825ce840c6033371c7f10e49a5b8)
(cherry picked from commit f1840669cfc92c033521bf95989479523e2a649d)
(cherry picked from commit bf09bfa2db7c0790f72d6d7d357e0b80c9eff1f2)
(cherry picked from commit 28619362288fbaf9c3757ee3ef770f575f7a6916)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++--
 .../org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java  |  3 +++
 .../org/apache/hadoop/hdfs/client/impl/DfsClientConf.java| 12 
 .../java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java  | 11 ---
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  5 +
 .../hadoop/hdfs/server/common/HdfsServerConstants.java   |  5 ++---
 .../org/apache/hadoop/hdfs/server/namenode/LeaseManager.java |  7 ++-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  |  8 
 .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java|  5 +++--
 .../src/test/java/org/apache/hadoop/hdfs/TestLease.java  |  5 +++--
 .../test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java |  6 +++---
 .../apache/hadoop/hdfs/server/namenode/TestFileTruncate.java |  3 ++-
 .../snapshot/TestINodeFileUnderConstructionWithSnapshot.java |  5 +++--
 13 files changed, 52 insertions(+), 27 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
old mode 100644
new mode 100755
index fffaafb..fb675ee
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -563,10 +563,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   } catch (IOException e) {
 // Abort if the lease has already expired.
 final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
-if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
+if (elapsed > dfsClientConf.getleaseHardLimitPeriod()) {
   LOG.warn("Failed to renew lease for " + clientName + " for "
   + (elapsed/1000) + " seconds (>= hard-limit ="
-  + (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000) + " seconds.) "
+  + (dfsClientConf.getleaseHardLimitPeriod() / 1000) + " seconds.) 
"
   + "Closing all files being written ...", e);
   closeAllFilesBeingWritten(true);
 } else {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
old mode 100644
new mode 100755
index 2d5278f..d81a0f5
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -195,6 +195,9 @@ public interface HdfsClientConfigKeys {
   "dfs.namenode.snapshot.capture.openfiles";
   boolean DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT = false;
 
+  String DFS_LEASE_HARDLIMIT_KEY = "dfs.namenode.lease-hard-limit-sec";
+  long DFS_LEASE_HARDLIMIT_DEFAULT = 20 * 60;
+
   /**
* These are deprecated config keys to client code.
*/
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index 316d374..8c75d03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -138,6 +138,7 @@ public class DfsClientConf {
   replicaAccessorBuilderClasses;
 
   private final boolean dataTransferTcpNoDelay;
+  private final long leaseHardLimitPeriod;
 
   public DfsClientConf(Configuration conf) {
 // T

[hadoop] branch branch-2.10 updated: HDFS-14758. Make lease hard limit configurable and reduce the default. Contributed by hemanthboyina.

2020-02-11 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 2861936  HDFS-14758. Make lease hard limit configurable and reduce the 
default. Contributed by hemanthboyina.
2861936 is described below

commit 28619362288fbaf9c3757ee3ef770f575f7a6916
Author: Kihwal Lee 
AuthorDate: Tue Feb 11 15:28:42 2020 -0600

HDFS-14758. Make lease hard limit configurable and reduce the default.
Contributed by hemanthboyina.

(cherry picked from commit 9b8a78d97bfd825ce840c6033371c7f10e49a5b8)
(cherry picked from commit f1840669cfc92c033521bf95989479523e2a649d)
(cherry picked from commit bf09bfa2db7c0790f72d6d7d357e0b80c9eff1f2)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++--
 .../org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java  |  3 +++
 .../org/apache/hadoop/hdfs/client/impl/DfsClientConf.java| 12 
 .../java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java  | 11 ---
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  5 +
 .../hadoop/hdfs/server/common/HdfsServerConstants.java   |  5 ++---
 .../org/apache/hadoop/hdfs/server/namenode/LeaseManager.java |  7 ++-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  |  8 
 .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java|  5 +++--
 .../src/test/java/org/apache/hadoop/hdfs/TestLease.java  |  5 +++--
 .../test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java |  6 +++---
 .../apache/hadoop/hdfs/server/namenode/TestFileTruncate.java |  3 ++-
 .../snapshot/TestINodeFileUnderConstructionWithSnapshot.java |  5 +++--
 13 files changed, 52 insertions(+), 27 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
old mode 100644
new mode 100755
index d5c8294..77ee893
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -565,10 +565,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   } catch (IOException e) {
 // Abort if the lease has already expired.
 final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
-if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
+if (elapsed > dfsClientConf.getleaseHardLimitPeriod()) {
   LOG.warn("Failed to renew lease for " + clientName + " for "
   + (elapsed/1000) + " seconds (>= hard-limit ="
-  + (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000) + " seconds.) "
+  + (dfsClientConf.getleaseHardLimitPeriod() / 1000) + " seconds.) 
"
   + "Closing all files being written ...", e);
   closeAllFilesBeingWritten(true);
 } else {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
old mode 100644
new mode 100755
index 44662d5..8122693
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -206,6 +206,9 @@ public interface HdfsClientConfigKeys {
   "dfs.namenode.snapshot.capture.openfiles";
   boolean DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT = false;
 
+  String DFS_LEASE_HARDLIMIT_KEY = "dfs.namenode.lease-hard-limit-sec";
+  long DFS_LEASE_HARDLIMIT_DEFAULT = 20 * 60;
+
   /**
* These are deprecated config keys to client code.
*/
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index da97be5..df2fd48 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/

[hadoop] branch branch-3.1 updated: HDFS-14758. Make lease hard limit configurable and reduce the default. Contributed by hemanthboyina.

2020-02-11 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new bf09bfa  HDFS-14758. Make lease hard limit configurable and reduce the 
default. Contributed by hemanthboyina.
bf09bfa is described below

commit bf09bfa2db7c0790f72d6d7d357e0b80c9eff1f2
Author: Kihwal Lee 
AuthorDate: Tue Feb 11 14:57:49 2020 -0600

HDFS-14758. Make lease hard limit configurable and reduce the default.
Contributed by hemanthboyina.

(cherry picked from commit 9b8a78d97bfd825ce840c6033371c7f10e49a5b8)
(cherry picked from commit f1840669cfc92c033521bf95989479523e2a649d)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++--
 .../org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java  |  3 +++
 .../org/apache/hadoop/hdfs/client/impl/DfsClientConf.java| 12 
 .../java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java  | 11 ---
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  4 
 .../org/apache/hadoop/hdfs/server/namenode/LeaseManager.java |  7 ++-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  |  8 
 .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java|  5 +++--
 .../src/test/java/org/apache/hadoop/hdfs/TestLease.java  |  5 +++--
 .../test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java |  6 +++---
 .../apache/hadoop/hdfs/server/namenode/TestFileTruncate.java |  3 ++-
 .../snapshot/TestINodeFileUnderConstructionWithSnapshot.java |  5 +++--
 12 files changed, 49 insertions(+), 24 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
old mode 100644
new mode 100755
index 39e5702..f5faef5
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -572,10 +572,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   } catch (IOException e) {
 // Abort if the lease has already expired.
 final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
-if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
+if (elapsed > dfsClientConf.getleaseHardLimitPeriod()) {
   LOG.warn("Failed to renew lease for " + clientName + " for "
   + (elapsed/1000) + " seconds (>= hard-limit ="
-  + (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000) + " seconds.) "
+  + (dfsClientConf.getleaseHardLimitPeriod() / 1000) + " seconds.) 
"
   + "Closing all files being written ...", e);
   closeAllFilesBeingWritten(true);
 } else {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
old mode 100644
new mode 100755
index 4196dde..2d886dd
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -197,6 +197,9 @@ public interface HdfsClientConfigKeys {
   "dfs.namenode.snapshot.capture.openfiles";
   boolean DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT = false;
 
+  String DFS_LEASE_HARDLIMIT_KEY = "dfs.namenode.lease-hard-limit-sec";
+  long DFS_LEASE_HARDLIMIT_DEFAULT = 20 * 60;
+
   /**
* These are deprecated config keys to client code.
*/
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index 49709f5..cb49295 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -146,6 +146,7 @@ public class DfsClientConf {
   private final int stripedReadThreadpoolSize;
 
   private final boolean dataTransferTcpNoDelay;
+  private final long leaseHardLimitPeriod;
 
   public DfsClientConf(Configuration conf) {
 // The hdfsTimeout is currently the same as the ipc timeout
@@ -273,6 +274,10 @@ public class DfsClientConf {
 HdfsClientConfigKeys.StripedRead.THREA

[hadoop] branch branch-3.2 updated: HDFS-14758. Make lease hard limit configurable and reduce the default. Contributed by hemanthboyina.

2020-02-11 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new f184066  HDFS-14758. Make lease hard limit configurable and reduce the 
default. Contributed by hemanthboyina.
f184066 is described below

commit f1840669cfc92c033521bf95989479523e2a649d
Author: Kihwal Lee 
AuthorDate: Tue Feb 11 14:50:10 2020 -0600

HDFS-14758. Make lease hard limit configurable and reduce the default.
Contributed by hemanthboyina.

(cherry picked from commit 9b8a78d97bfd825ce840c6033371c7f10e49a5b8)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++--
 .../org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java  |  3 +++
 .../org/apache/hadoop/hdfs/client/impl/DfsClientConf.java| 12 
 .../java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java  | 11 ---
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  4 
 .../org/apache/hadoop/hdfs/server/namenode/LeaseManager.java |  7 ++-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  |  8 
 .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java|  5 +++--
 .../src/test/java/org/apache/hadoop/hdfs/TestLease.java  |  5 +++--
 .../test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java |  6 +++---
 .../apache/hadoop/hdfs/server/namenode/TestFileTruncate.java |  3 ++-
 .../snapshot/TestINodeFileUnderConstructionWithSnapshot.java |  3 ++-
 12 files changed, 48 insertions(+), 23 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
old mode 100644
new mode 100755
index 614fc68..187143e
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -572,10 +572,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   } catch (IOException e) {
 // Abort if the lease has already expired.
 final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
-if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
+if (elapsed > dfsClientConf.getleaseHardLimitPeriod()) {
   LOG.warn("Failed to renew lease for " + clientName + " for "
   + (elapsed/1000) + " seconds (>= hard-limit ="
-  + (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000) + " seconds.) "
+  + (dfsClientConf.getleaseHardLimitPeriod() / 1000) + " seconds.) 
"
   + "Closing all files being written ...", e);
   closeAllFilesBeingWritten(true);
 } else {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
old mode 100644
new mode 100755
index b596c40..6a7e0a8
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -200,6 +200,9 @@ public interface HdfsClientConfigKeys {
   String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS =
   "dfs.provided.aliasmap.inmemory.dnrpc-address";
 
+  String DFS_LEASE_HARDLIMIT_KEY = "dfs.namenode.lease-hard-limit-sec";
+  long DFS_LEASE_HARDLIMIT_DEFAULT = 20 * 60;
+
   /**
* These are deprecated config keys to client code.
*/
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index 49709f5..cb49295 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -146,6 +146,7 @@ public class DfsClientConf {
   private final int stripedReadThreadpoolSize;
 
   private final boolean dataTransferTcpNoDelay;
+  private final long leaseHardLimitPer

[hadoop] branch trunk updated: HDFS-14758. Make lease hard limit configurable and reduce the default. Contributed by hemanthboyina.

2020-02-11 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9b8a78d  HDFS-14758. Make lease hard limit configurable and reduce the 
default. Contributed by hemanthboyina.
9b8a78d is described below

commit 9b8a78d97bfd825ce840c6033371c7f10e49a5b8
Author: Kihwal Lee 
AuthorDate: Tue Feb 11 12:40:00 2020 -0600

HDFS-14758. Make lease hard limit configurable and reduce the default.
Contributed by hemanthboyina.
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++--
 .../org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java  |  3 +++
 .../org/apache/hadoop/hdfs/client/impl/DfsClientConf.java| 12 
 .../java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java  | 11 ---
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  6 ++
 .../org/apache/hadoop/hdfs/server/namenode/LeaseManager.java |  7 ++-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  |  8 
 .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java|  5 +++--
 .../src/test/java/org/apache/hadoop/hdfs/TestLease.java  |  5 +++--
 .../test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java |  6 +++---
 .../apache/hadoop/hdfs/server/namenode/TestFileTruncate.java |  3 ++-
 .../snapshot/TestINodeFileUnderConstructionWithSnapshot.java |  3 ++-
 12 files changed, 50 insertions(+), 23 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
old mode 100644
new mode 100755
index e0eaa19..0309cbd
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -577,10 +577,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   } catch (IOException e) {
 // Abort if the lease has already expired.
 final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
-if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
+if (elapsed > dfsClientConf.getleaseHardLimitPeriod()) {
   LOG.warn("Failed to renew lease for " + clientName + " for "
   + (elapsed/1000) + " seconds (>= hard-limit ="
-  + (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000) + " seconds.) "
+  + (dfsClientConf.getleaseHardLimitPeriod() / 1000) + " seconds.) 
"
   + "Closing all files being written ...", e);
   closeAllFilesBeingWritten(true);
 } else {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
old mode 100644
new mode 100755
index 38f0016..228d7ac
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -242,6 +242,9 @@ public interface HdfsClientConfigKeys {
   String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS =
   "dfs.provided.aliasmap.inmemory.dnrpc-address";
 
+  String DFS_LEASE_HARDLIMIT_KEY = "dfs.namenode.lease-hard-limit-sec";
+  long DFS_LEASE_HARDLIMIT_DEFAULT = 20 * 60;
+
   /**
* These are deprecated config keys to client code.
*/
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index d6194f4..07f0eee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -151,6 +151,7 @@ public class DfsClientConf {
   private final boolean dataTransferTcpNoDelay;
 
   private final boolean deadNodeDetectionEnabled;
+  private final long leaseHardLimitPeriod;
 
   public DfsClientConf(Configuration conf) {
 // The hdfsTimeout is currently the same as the ipc timeout
@@ -285,6 +286,10 @@ public class DfsClientConf {
 HdfsClientConfigKeys.StripedRead.THREADPOOL_SIZE_KEY +
 " must be greater than 0.");
 replicaAccessorBuilderClasses = loadReplicaAccessorBuilderClasses(conf);
+
+leaseHardLimitPeriod =
+conf.getLong(HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
+HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_DEFAUL

[hadoop] branch branch-2.8 updated: HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with jdk8u242. Contributed by Zsolt Venczel.

2020-02-05 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new c5cc75a  HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with 
jdk8u242. Contributed by Zsolt Venczel.
c5cc75a is described below

commit c5cc75a8550d5600b0f7d516d3c3490870b8847b
Author: Kihwal Lee 
AuthorDate: Wed Feb 5 13:48:32 2020 -0600

HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with jdk8u242. 
Contributed by Zsolt Venczel.

(cherry picked from commit a463cf75a0ab1f0dbb8cfa16c39a4e698bc1a625)
(cherry picked from commit 0b19c8e0860bf558d995951ab9ac4cd8a1674759)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
---
 .../src/test/java/org/apache/hadoop/ipc/TestIPC.java   | 18 +-
 1 file changed, 13 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 61e1d8c..d28d95b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
@@ -592,6 +593,16 @@ public class TestIPC {
   }
 
   /**
+   * Mock socket class to help inject an exception for HADOOP-7428.
+   */
+  static class MockSocket extends Socket {
+@Override
+public synchronized void setSoTimeout(int timeout) {
+  throw new RuntimeException("Injected fault");
+}
+  }
+
+  /**
* Test that, if a RuntimeException is thrown after creating a socket
* but before successfully connecting to the IPC server, that the
* failure is handled properly. This is a regression test for
@@ -604,11 +615,8 @@ public class TestIPC {
 SocketFactory spyFactory = spy(NetUtils.getDefaultSocketFactory(conf));
 Mockito.doAnswer(new Answer() {
   @Override
-  public Socket answer(InvocationOnMock invocation) throws Throwable {
-Socket s = spy((Socket)invocation.callRealMethod());
-doThrow(new RuntimeException("Injected fault")).when(s)
-  .setSoTimeout(anyInt());
-return s;
+  public Socket answer(InvocationOnMock invocation) {
+return new MockSocket();
   }
 }).when(spyFactory).createSocket();
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with jdk8u242. Contributed by Zsolt Venczel.

2020-02-05 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 9cc63d1  HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with 
jdk8u242. Contributed by Zsolt Venczel.
9cc63d1 is described below

commit 9cc63d1f637823760b205ea2a6e31daad64caf84
Author: Kihwal Lee 
AuthorDate: Wed Feb 5 13:33:10 2020 -0600

HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with jdk8u242. 
Contributed by Zsolt Venczel.

(cherry picked from commit a463cf75a0ab1f0dbb8cfa16c39a4e698bc1a625)
(cherry picked from commit 0b19c8e0860bf558d995951ab9ac4cd8a1674759)
---
 .../src/test/java/org/apache/hadoop/ipc/TestIPC.java   | 18 --
 1 file changed, 12 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 9e42690..3501117 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Matchers.anyInt;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
@@ -642,6 +641,16 @@ public class TestIPC {
   }
 
   /**
+   * Mock socket class to help inject an exception for HADOOP-7428.
+   */
+  static class MockSocket extends Socket {
+@Override
+public synchronized void setSoTimeout(int timeout) {
+  throw new RuntimeException("Injected fault");
+}
+  }
+
+  /**
* Test that, if a RuntimeException is thrown after creating a socket
* but before successfully connecting to the IPC server, that the
* failure is handled properly. This is a regression test for
@@ -654,11 +663,8 @@ public class TestIPC {
 SocketFactory spyFactory = spy(NetUtils.getDefaultSocketFactory(conf));
 Mockito.doAnswer(new Answer() {
   @Override
-  public Socket answer(InvocationOnMock invocation) throws Throwable {
-Socket s = spy((Socket)invocation.callRealMethod());
-doThrow(new RuntimeException("Injected fault")).when(s)
-  .setSoTimeout(anyInt());
-return s;
+  public Socket answer(InvocationOnMock invocation) {
+return new MockSocket();
   }
 }).when(spyFactory).createSocket();
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with jdk8u242. Contributed by Zsolt Venczel.

2020-02-05 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new a628ab0  HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with 
jdk8u242. Contributed by Zsolt Venczel.
a628ab0 is described below

commit a628ab0863111c65e5fe5526176403177f63e698
Author: Kihwal Lee 
AuthorDate: Wed Feb 5 13:16:34 2020 -0600

HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with jdk8u242. 
Contributed by Zsolt Venczel.

(cherry picked from commit a463cf75a0ab1f0dbb8cfa16c39a4e698bc1a625)
(cherry picked from commit 0b19c8e0860bf558d995951ab9ac4cd8a1674759)
---
 .../src/test/java/org/apache/hadoop/ipc/TestIPC.java   | 18 --
 1 file changed, 12 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index f275f97..830a826 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Matchers.anyInt;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
@@ -642,6 +641,16 @@ public class TestIPC {
   }
 
   /**
+   * Mock socket class to help inject an exception for HADOOP-7428.
+   */
+  static class MockSocket extends Socket {
+@Override
+public synchronized void setSoTimeout(int timeout) {
+  throw new RuntimeException("Injected fault");
+}
+  }
+
+  /**
* Test that, if a RuntimeException is thrown after creating a socket
* but before successfully connecting to the IPC server, that the
* failure is handled properly. This is a regression test for
@@ -654,11 +663,8 @@ public class TestIPC {
 SocketFactory spyFactory = spy(NetUtils.getDefaultSocketFactory(conf));
 Mockito.doAnswer(new Answer() {
   @Override
-  public Socket answer(InvocationOnMock invocation) throws Throwable {
-Socket s = spy((Socket)invocation.callRealMethod());
-doThrow(new RuntimeException("Injected fault")).when(s)
-  .setSoTimeout(anyInt());
-return s;
+  public Socket answer(InvocationOnMock invocation) {
+return new MockSocket();
   }
 }).when(spyFactory).createSocket();
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with jdk8u242. Contributed by Zsolt Venczel.

2020-02-05 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 0b19c8e  HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with 
jdk8u242. Contributed by Zsolt Venczel.
0b19c8e is described below

commit 0b19c8e0860bf558d995951ab9ac4cd8a1674759
Author: Kihwal Lee 
AuthorDate: Wed Feb 5 13:02:21 2020 -0600

HADOOP-15787. TestIPC.testRTEDuringConnectionSetup fails with jdk8u242. 
Contributed by Zsolt Venczel.

(cherry picked from commit a463cf75a0ab1f0dbb8cfa16c39a4e698bc1a625)
---
 .../src/test/java/org/apache/hadoop/ipc/TestIPC.java   | 18 --
 1 file changed, 12 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 7ba4e53..6d2f9c7 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Matchers.anyInt;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
@@ -642,6 +641,16 @@ public class TestIPC {
   }
 
   /**
+   * Mock socket class to help inject an exception for HADOOP-7428.
+   */
+  static class MockSocket extends Socket {
+@Override
+public synchronized void setSoTimeout(int timeout) {
+  throw new RuntimeException("Injected fault");
+}
+  }
+
+  /**
* Test that, if a RuntimeException is thrown after creating a socket
* but before successfully connecting to the IPC server, that the
* failure is handled properly. This is a regression test for
@@ -654,11 +663,8 @@ public class TestIPC {
 SocketFactory spyFactory = spy(NetUtils.getDefaultSocketFactory(conf));
 Mockito.doAnswer(new Answer() {
   @Override
-  public Socket answer(InvocationOnMock invocation) throws Throwable {
-Socket s = spy((Socket)invocation.callRealMethod());
-doThrow(new RuntimeException("Injected fault")).when(s)
-  .setSoTimeout(anyInt());
-return s;
+  public Socket answer(InvocationOnMock invocation) {
+return new MockSocket();
   }
 }).when(spyFactory).createSocket();
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-12491. Support wildcard in CLASSPATH for libhdfs. Contributed by Muhammad Samir Khan.

2020-02-04 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new fb8ee32  HDFS-12491. Support wildcard in CLASSPATH for libhdfs. 
Contributed by Muhammad Samir Khan.
fb8ee32 is described below

commit fb8ee3228e7658adcf10c35ee0142885029783fd
Author: Kihwal Lee 
AuthorDate: Tue Feb 4 12:38:05 2020 -0600

HDFS-12491. Support wildcard in CLASSPATH for libhdfs. Contributed by 
Muhammad Samir Khan.

(cherry picked from commit 10a60fbe20bb08cdd71076ea9bf2ebb3a2f6226e)
---
 .../src/main/native/libhdfs/jni_helper.c   | 277 -
 .../src/main/native/libhdfs/jni_helper.h   |  10 +-
 .../hadoop-hdfs/src/site/markdown/LibHdfs.md   |   3 +-
 3 files changed, 287 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
index 50d9681..0ceb98e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
@@ -24,6 +24,8 @@
 #include "os/mutexes.h"
 #include "os/thread_local_storage.h"
 
+#include 
+#include 
 #include  
 #include  
 
@@ -358,6 +360,277 @@ done:
 
 
 /**
+ * For the given path, expand it by filling in with all *.jar or *.JAR files,
+ * separated by PATH_SEPARATOR. Assumes that expanded is big enough to hold the
+ * string, eg allocated after using this function with expanded=NULL to get the
+ * right size. Also assumes that the path ends with a "/.". The length of the
+ * expanded path is returned, which includes space at the end for either a
+ * PATH_SEPARATOR or null terminator.
+ */
+static ssize_t wildcard_expandPath(const char* path, char* expanded)
+{
+struct dirent* file;
+char* dest = expanded;
+ssize_t length = 0;
+size_t pathLength = strlen(path);
+DIR* dir;
+
+dir = opendir(path);
+if (dir != NULL) {
+// can open dir so try to match with all *.jar and *.JAR entries
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_expandPath: %s\n", path);
+#endif
+
+errno = 0;
+while ((file = readdir(dir)) != NULL) {
+const char* filename = file->d_name;
+const size_t filenameLength = strlen(filename);
+const char* jarExtension;
+
+// If filename is smaller than 4 characters then it can not 
possibly
+// have extension ".jar" or ".JAR"
+if (filenameLength < 4) {
+continue;
+}
+
+jarExtension = [filenameLength-4];
+if ((strcmp(jarExtension, ".jar") == 0) ||
+(strcmp(jarExtension, ".JAR") == 0)) {
+
+// pathLength includes an extra '.' which we'll use for either
+// separator or null termination
+length += pathLength + filenameLength;
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_scanPath:\t%s\t:\t%zd\n", filename, length);
+#endif
+
+if (expanded != NULL) {
+// pathLength includes an extra '.'
+strncpy(dest, path, pathLength-1);
+dest += pathLength - 1;
+strncpy(dest, filename, filenameLength);
+dest += filenameLength;
+*dest = PATH_SEPARATOR;
+dest++;
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_expandPath:\t%s\t:\t%s\n",
+  filename, expanded);
+#endif
+}
+}
+}
+
+if (errno != 0) {
+fprintf(stderr, "wildcard_expandPath: on readdir %s: %s\n",
+  path, strerror(errno));
+length = -1;
+}
+
+if (closedir(dir) != 0) {
+fprintf(stderr, "wildcard_expandPath: on closedir %s: %s\n",
+path, strerror(errno));
+}
+} else if ((errno != EACCES) && (errno != ENOENT) && (errno != ENOTDIR)) {
+// can not opendir due to an error we can not handle
+fprintf(stderr, "wildcard_expandPath: on opendir %s: %s\n", path,
+strerror(errno));
+length = -1;
+}
+
+if (length == 0) {
+// either we failed to open dir due to EACCESS, ENOENT, or ENOTDIR, or
+// we did not find any file that matches *.jar or *.JAR
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+fprintf(stderr, "wildcard_expandPath: can not expand %.*s*: %s\n",
+(int)(pathLength-1), p

[hadoop] branch branch-3.1 updated: HDFS-12491. Support wildcard in CLASSPATH for libhdfs. Contributed by Muhammad Samir Khan.

2020-02-04 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new a55a0a1  HDFS-12491. Support wildcard in CLASSPATH for libhdfs. 
Contributed by Muhammad Samir Khan.
a55a0a1 is described below

commit a55a0a1f6d4361ab28b919410eddc0a2daa8d34a
Author: Kihwal Lee 
AuthorDate: Tue Feb 4 12:24:58 2020 -0600

HDFS-12491. Support wildcard in CLASSPATH for libhdfs. Contributed by 
Muhammad Samir Khan.

(cherry picked from commit 10a60fbe20bb08cdd71076ea9bf2ebb3a2f6226e)
---
 .../src/main/native/libhdfs/jni_helper.c   | 277 -
 .../src/main/native/libhdfs/jni_helper.h   |  10 +-
 .../hadoop-hdfs/src/site/markdown/LibHdfs.md   |   3 +-
 3 files changed, 287 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
index c45d598..91a3c1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
@@ -24,6 +24,8 @@
 #include "os/mutexes.h"
 #include "os/thread_local_storage.h"
 
+#include 
+#include 
 #include  
 #include  
 
@@ -358,6 +360,277 @@ done:
 
 
 /**
+ * For the given path, expand it by filling in with all *.jar or *.JAR files,
+ * separated by PATH_SEPARATOR. Assumes that expanded is big enough to hold the
+ * string, eg allocated after using this function with expanded=NULL to get the
+ * right size. Also assumes that the path ends with a "/.". The length of the
+ * expanded path is returned, which includes space at the end for either a
+ * PATH_SEPARATOR or null terminator.
+ */
+static ssize_t wildcard_expandPath(const char* path, char* expanded)
+{
+struct dirent* file;
+char* dest = expanded;
+ssize_t length = 0;
+size_t pathLength = strlen(path);
+DIR* dir;
+
+dir = opendir(path);
+if (dir != NULL) {
+// can open dir so try to match with all *.jar and *.JAR entries
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_expandPath: %s\n", path);
+#endif
+
+errno = 0;
+while ((file = readdir(dir)) != NULL) {
+const char* filename = file->d_name;
+const size_t filenameLength = strlen(filename);
+const char* jarExtension;
+
+// If filename is smaller than 4 characters then it can not 
possibly
+// have extension ".jar" or ".JAR"
+if (filenameLength < 4) {
+continue;
+}
+
+jarExtension = [filenameLength-4];
+if ((strcmp(jarExtension, ".jar") == 0) ||
+(strcmp(jarExtension, ".JAR") == 0)) {
+
+// pathLength includes an extra '.' which we'll use for either
+// separator or null termination
+length += pathLength + filenameLength;
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_scanPath:\t%s\t:\t%zd\n", filename, length);
+#endif
+
+if (expanded != NULL) {
+// pathLength includes an extra '.'
+strncpy(dest, path, pathLength-1);
+dest += pathLength - 1;
+strncpy(dest, filename, filenameLength);
+dest += filenameLength;
+*dest = PATH_SEPARATOR;
+dest++;
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_expandPath:\t%s\t:\t%s\n",
+  filename, expanded);
+#endif
+}
+}
+}
+
+if (errno != 0) {
+fprintf(stderr, "wildcard_expandPath: on readdir %s: %s\n",
+  path, strerror(errno));
+length = -1;
+}
+
+if (closedir(dir) != 0) {
+fprintf(stderr, "wildcard_expandPath: on closedir %s: %s\n",
+path, strerror(errno));
+}
+} else if ((errno != EACCES) && (errno != ENOENT) && (errno != ENOTDIR)) {
+// can not opendir due to an error we can not handle
+fprintf(stderr, "wildcard_expandPath: on opendir %s: %s\n", path,
+strerror(errno));
+length = -1;
+}
+
+if (length == 0) {
+// either we failed to open dir due to EACCESS, ENOENT, or ENOTDIR, or
+// we did not find any file that matches *.jar or *.JAR
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+fprintf(stderr, "wildcard_expandPath: can not expand %.*s*: %s\n",
+(int)(pathLength-1), p

[hadoop] branch branch-3.2 updated: HDFS-12491. Support wildcard in CLASSPATH for libhdfs. Contributed by Muhammad Samir Khan.

2020-02-04 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 5257afb  HDFS-12491. Support wildcard in CLASSPATH for libhdfs. 
Contributed by Muhammad Samir Khan.
5257afb is described below

commit 5257afb1531be9ee72865d16ce3a383ff203cb5c
Author: Kihwal Lee 
AuthorDate: Tue Feb 4 12:24:12 2020 -0600

HDFS-12491. Support wildcard in CLASSPATH for libhdfs. Contributed by 
Muhammad Samir Khan.

(cherry picked from commit 10a60fbe20bb08cdd71076ea9bf2ebb3a2f6226e)
---
 .../src/main/native/libhdfs/jni_helper.c   | 277 -
 .../src/main/native/libhdfs/jni_helper.h   |  10 +-
 .../hadoop-hdfs/src/site/markdown/LibHdfs.md   |   3 +-
 3 files changed, 287 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
index c45d598..91a3c1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
@@ -24,6 +24,8 @@
 #include "os/mutexes.h"
 #include "os/thread_local_storage.h"
 
+#include 
+#include 
 #include  
 #include  
 
@@ -358,6 +360,277 @@ done:
 
 
 /**
+ * For the given path, expand it by filling in with all *.jar or *.JAR files,
+ * separated by PATH_SEPARATOR. Assumes that expanded is big enough to hold the
+ * string, eg allocated after using this function with expanded=NULL to get the
+ * right size. Also assumes that the path ends with a "/.". The length of the
+ * expanded path is returned, which includes space at the end for either a
+ * PATH_SEPARATOR or null terminator.
+ */
+static ssize_t wildcard_expandPath(const char* path, char* expanded)
+{
+struct dirent* file;
+char* dest = expanded;
+ssize_t length = 0;
+size_t pathLength = strlen(path);
+DIR* dir;
+
+dir = opendir(path);
+if (dir != NULL) {
+// can open dir so try to match with all *.jar and *.JAR entries
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_expandPath: %s\n", path);
+#endif
+
+errno = 0;
+while ((file = readdir(dir)) != NULL) {
+const char* filename = file->d_name;
+const size_t filenameLength = strlen(filename);
+const char* jarExtension;
+
+// If filename is smaller than 4 characters then it can not 
possibly
+// have extension ".jar" or ".JAR"
+if (filenameLength < 4) {
+continue;
+}
+
+jarExtension = [filenameLength-4];
+if ((strcmp(jarExtension, ".jar") == 0) ||
+(strcmp(jarExtension, ".JAR") == 0)) {
+
+// pathLength includes an extra '.' which we'll use for either
+// separator or null termination
+length += pathLength + filenameLength;
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_scanPath:\t%s\t:\t%zd\n", filename, length);
+#endif
+
+if (expanded != NULL) {
+// pathLength includes an extra '.'
+strncpy(dest, path, pathLength-1);
+dest += pathLength - 1;
+strncpy(dest, filename, filenameLength);
+dest += filenameLength;
+*dest = PATH_SEPARATOR;
+dest++;
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_expandPath:\t%s\t:\t%s\n",
+  filename, expanded);
+#endif
+}
+}
+}
+
+if (errno != 0) {
+fprintf(stderr, "wildcard_expandPath: on readdir %s: %s\n",
+  path, strerror(errno));
+length = -1;
+}
+
+if (closedir(dir) != 0) {
+fprintf(stderr, "wildcard_expandPath: on closedir %s: %s\n",
+path, strerror(errno));
+}
+} else if ((errno != EACCES) && (errno != ENOENT) && (errno != ENOTDIR)) {
+// can not opendir due to an error we can not handle
+fprintf(stderr, "wildcard_expandPath: on opendir %s: %s\n", path,
+strerror(errno));
+length = -1;
+}
+
+if (length == 0) {
+// either we failed to open dir due to EACCESS, ENOENT, or ENOTDIR, or
+// we did not find any file that matches *.jar or *.JAR
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+fprintf(stderr, "wildcard_expandPath: can not expand %.*s*: %s\n",
+(int)(pathLength-1), p

[hadoop] branch trunk updated: HDFS-12491. Support wildcard in CLASSPATH for libhdfs. Contributed by Muhammad Samir Khan.

2020-02-04 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 10a60fbe HDFS-12491. Support wildcard in CLASSPATH for libhdfs. 
Contributed by Muhammad Samir Khan.
10a60fbe is described below

commit 10a60fbe20bb08cdd71076ea9bf2ebb3a2f6226e
Author: Kihwal Lee 
AuthorDate: Tue Feb 4 12:22:35 2020 -0600

HDFS-12491. Support wildcard in CLASSPATH for libhdfs. Contributed by 
Muhammad Samir Khan.
---
 .../src/main/native/libhdfs/jni_helper.c   | 277 -
 .../src/main/native/libhdfs/jni_helper.h   |  10 +-
 .../hadoop-hdfs/src/site/markdown/LibHdfs.md   |   3 +-
 3 files changed, 287 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
index 837c7e0..4efb3b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c
@@ -24,6 +24,8 @@
 #include "os/mutexes.h"
 #include "os/thread_local_storage.h"
 
+#include 
+#include 
 #include  
 #include  
 
@@ -371,6 +373,277 @@ done:
 }
 
 /**
+ * For the given path, expand it by filling in with all *.jar or *.JAR files,
+ * separated by PATH_SEPARATOR. Assumes that expanded is big enough to hold the
+ * string, eg allocated after using this function with expanded=NULL to get the
+ * right size. Also assumes that the path ends with a "/.". The length of the
+ * expanded path is returned, which includes space at the end for either a
+ * PATH_SEPARATOR or null terminator.
+ */
+static ssize_t wildcard_expandPath(const char* path, char* expanded)
+{
+struct dirent* file;
+char* dest = expanded;
+ssize_t length = 0;
+size_t pathLength = strlen(path);
+DIR* dir;
+
+dir = opendir(path);
+if (dir != NULL) {
+// can open dir so try to match with all *.jar and *.JAR entries
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_expandPath: %s\n", path);
+#endif
+
+errno = 0;
+while ((file = readdir(dir)) != NULL) {
+const char* filename = file->d_name;
+const size_t filenameLength = strlen(filename);
+const char* jarExtension;
+
+// If filename is smaller than 4 characters then it can not 
possibly
+// have extension ".jar" or ".JAR"
+if (filenameLength < 4) {
+continue;
+}
+
+jarExtension = [filenameLength-4];
+if ((strcmp(jarExtension, ".jar") == 0) ||
+(strcmp(jarExtension, ".JAR") == 0)) {
+
+// pathLength includes an extra '.' which we'll use for either
+// separator or null termination
+length += pathLength + filenameLength;
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_scanPath:\t%s\t:\t%zd\n", filename, length);
+#endif
+
+if (expanded != NULL) {
+// pathLength includes an extra '.'
+strncpy(dest, path, pathLength-1);
+dest += pathLength - 1;
+strncpy(dest, filename, filenameLength);
+dest += filenameLength;
+*dest = PATH_SEPARATOR;
+dest++;
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+printf("wildcard_expandPath:\t%s\t:\t%s\n",
+  filename, expanded);
+#endif
+}
+}
+}
+
+if (errno != 0) {
+fprintf(stderr, "wildcard_expandPath: on readdir %s: %s\n",
+  path, strerror(errno));
+length = -1;
+}
+
+if (closedir(dir) != 0) {
+fprintf(stderr, "wildcard_expandPath: on closedir %s: %s\n",
+path, strerror(errno));
+}
+} else if ((errno != EACCES) && (errno != ENOENT) && (errno != ENOTDIR)) {
+// can not opendir due to an error we can not handle
+fprintf(stderr, "wildcard_expandPath: on opendir %s: %s\n", path,
+strerror(errno));
+length = -1;
+}
+
+if (length == 0) {
+// either we failed to open dir due to EACCESS, ENOENT, or ENOTDIR, or
+// we did not find any file that matches *.jar or *.JAR
+
+#ifdef _LIBHDFS_JNI_HELPER_DEBUGGING_ON_
+fprintf(stderr, "wildcard_expandPath: can not expand %.*s*: %s\n",
+(int)(pathLength-1), path, strerror(errno));
+#endif
+
+// in this case, the w

[hadoop] branch branch-2.10 updated: HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails intermittently. Contributed by Ahmed Hussein.

2020-01-29 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new d9198b2  HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails 
intermittently. Contributed by Ahmed Hussein.
d9198b2 is described below

commit d9198b2d1bcd1a269ba929ad03420dc58b0fcb81
Author: Kihwal Lee 
AuthorDate: Wed Jan 29 11:07:50 2020 -0600

HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails
intermittently. Contributed by Ahmed Hussein.
---
 .../java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index c8456b1..830dc0e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -844,6 +844,7 @@ public class TestBalancer {
 cluster.startDataNodes(conf, 1, true, null,
 new String[]{newRack}, null,new long[]{newCapacity});
 totalCapacity += newCapacity;
+cluster.triggerHeartbeats();
   } else {
 //if running a test with "include list", include original nodes as well
 if (nodes.getNumberofIncludeNodes()>0) {
@@ -860,11 +861,13 @@ public class TestBalancer {
 if (nodes.getNames() != null) {
   cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null,
   newRacks, nodes.getNames(), newCapacities);
-  totalCapacity += newCapacity*nodes.getNumberofNewNodes();
+  cluster.triggerHeartbeats();
+  totalCapacity += newCapacity * nodes.getNumberofNewNodes();
 } else {  // host names are not specified
   cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null,
   newRacks, null, newCapacities);
-  totalCapacity += newCapacity*nodes.getNumberofNewNodes();
+  cluster.triggerHeartbeats();
+  totalCapacity += newCapacity * nodes.getNumberofNewNodes();
   //populate the include nodes
   if (nodes.getNumberofIncludeNodes() > 0) {
 int totalNodes = cluster.getDataNodes().size();
@@ -1781,6 +1784,7 @@ public class TestBalancer {
 // start up an empty node with the same capacity and on the same rack
 cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
 new long[] { newCapacity });
+cluster.triggerHeartbeats();
 
 // Case1: Simulate first balancer by creating 'balancer.id' file. It
 // will keep this file until the balancing operation is completed.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails intermittently. Contributed by Ahmed Hussein.

2020-01-29 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new b8d87cf  HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails 
intermittently. Contributed by Ahmed Hussein.
b8d87cf is described below

commit b8d87cfc321ace7ad4d74a67c6770579bd83104a
Author: Kihwal Lee 
AuthorDate: Wed Jan 29 11:01:51 2020 -0600

HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails
intermittently. Contributed by Ahmed Hussein.

(cherry picked from commit 799d4c1cf4e8fe78eb9ab607a0449cdd075041fb)
---
 .../java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 4b0d653..cafb67b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -870,6 +870,7 @@ public class TestBalancer {
 cluster.startDataNodes(conf, 1, true, null,
 new String[]{newRack}, null,new long[]{newCapacity});
 totalCapacity += newCapacity;
+cluster.triggerHeartbeats();
   } else {
 //if running a test with "include list", include original nodes as well
 if (nodes.getNumberofIncludeNodes()>0) {
@@ -886,11 +887,13 @@ public class TestBalancer {
 if (nodes.getNames() != null) {
   cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null,
   newRacks, nodes.getNames(), newCapacities);
-  totalCapacity += newCapacity*nodes.getNumberofNewNodes();
+  cluster.triggerHeartbeats();
+  totalCapacity += newCapacity * nodes.getNumberofNewNodes();
 } else {  // host names are not specified
   cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null,
   newRacks, null, newCapacities);
-  totalCapacity += newCapacity*nodes.getNumberofNewNodes();
+  cluster.triggerHeartbeats();
+  totalCapacity += newCapacity * nodes.getNumberofNewNodes();
   //populate the include nodes
   if (nodes.getNumberofIncludeNodes() > 0) {
 int totalNodes = cluster.getDataNodes().size();
@@ -1905,6 +1908,7 @@ public class TestBalancer {
 // start up an empty node with the same capacity and on the same rack
 cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
 new long[] { newCapacity });
+cluster.triggerHeartbeats();
 
 // Case1: Simulate first balancer by creating 'balancer.id' file. It
 // will keep this file until the balancing operation is completed.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails intermittently. Contributed by Ahmed Hussein.

2020-01-29 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 7034482  HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails 
intermittently. Contributed by Ahmed Hussein.
7034482 is described below

commit 7034487fdaee0d22d8cad5068bacf8a8a570
Author: Kihwal Lee 
AuthorDate: Wed Jan 29 11:01:12 2020 -0600

HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails
intermittently. Contributed by Ahmed Hussein.

(cherry picked from commit 799d4c1cf4e8fe78eb9ab607a0449cdd075041fb)
---
 .../java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index c6c17e0..d4ca119 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -870,6 +870,7 @@ public class TestBalancer {
 cluster.startDataNodes(conf, 1, true, null,
 new String[]{newRack}, null,new long[]{newCapacity});
 totalCapacity += newCapacity;
+cluster.triggerHeartbeats();
   } else {
 //if running a test with "include list", include original nodes as well
 if (nodes.getNumberofIncludeNodes()>0) {
@@ -886,11 +887,13 @@ public class TestBalancer {
 if (nodes.getNames() != null) {
   cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null,
   newRacks, nodes.getNames(), newCapacities);
-  totalCapacity += newCapacity*nodes.getNumberofNewNodes();
+  cluster.triggerHeartbeats();
+  totalCapacity += newCapacity * nodes.getNumberofNewNodes();
 } else {  // host names are not specified
   cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null,
   newRacks, null, newCapacities);
-  totalCapacity += newCapacity*nodes.getNumberofNewNodes();
+  cluster.triggerHeartbeats();
+  totalCapacity += newCapacity * nodes.getNumberofNewNodes();
   //populate the include nodes
   if (nodes.getNumberofIncludeNodes() > 0) {
 int totalNodes = cluster.getDataNodes().size();
@@ -1905,6 +1908,7 @@ public class TestBalancer {
 // start up an empty node with the same capacity and on the same rack
 cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
 new long[] { newCapacity });
+cluster.triggerHeartbeats();
 
 // Case1: Simulate first balancer by creating 'balancer.id' file. It
 // will keep this file until the balancing operation is completed.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails intermittently. Contributed by Ahmed Hussein.

2020-01-29 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 799d4c1  HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails 
intermittently. Contributed by Ahmed Hussein.
799d4c1 is described below

commit 799d4c1cf4e8fe78eb9ab607a0449cdd075041fb
Author: Kihwal Lee 
AuthorDate: Wed Jan 29 10:59:36 2020 -0600

HDFS-15146. TestBalancerRPCDelay.testBalancerRPCDelay fails
intermittently. Contributed by Ahmed Hussein.
---
 .../java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index c6c17e0..d4ca119 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -870,6 +870,7 @@ public class TestBalancer {
 cluster.startDataNodes(conf, 1, true, null,
 new String[]{newRack}, null,new long[]{newCapacity});
 totalCapacity += newCapacity;
+cluster.triggerHeartbeats();
   } else {
 //if running a test with "include list", include original nodes as well
 if (nodes.getNumberofIncludeNodes()>0) {
@@ -886,11 +887,13 @@ public class TestBalancer {
 if (nodes.getNames() != null) {
   cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null,
   newRacks, nodes.getNames(), newCapacities);
-  totalCapacity += newCapacity*nodes.getNumberofNewNodes();
+  cluster.triggerHeartbeats();
+  totalCapacity += newCapacity * nodes.getNumberofNewNodes();
 } else {  // host names are not specified
   cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null,
   newRacks, null, newCapacities);
-  totalCapacity += newCapacity*nodes.getNumberofNewNodes();
+  cluster.triggerHeartbeats();
+  totalCapacity += newCapacity * nodes.getNumberofNewNodes();
   //populate the include nodes
   if (nodes.getNumberofIncludeNodes() > 0) {
 int totalNodes = cluster.getDataNodes().size();
@@ -1905,6 +1908,7 @@ public class TestBalancer {
 // start up an empty node with the same capacity and on the same rack
 cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
 new long[] { newCapacity });
+cluster.triggerHeartbeats();
 
 // Case1: Simulate first balancer by creating 'balancer.id' file. It
 // will keep this file until the balancing operation is completed.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HADOOP-16683. Disable retry of FailoverOnNetworkExceptionRetry in case of wrapped AccessControlException. Contributed by Adam Antal

2020-01-24 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 8d4671b611eb9f5a1a2f597f92fa6492a320997e
Author: Kihwal Lee 
AuthorDate: Fri Jan 24 11:08:24 2020 -0600

HADOOP-16683. Disable retry of FailoverOnNetworkExceptionRetry in case of 
wrapped AccessControlException. Contributed by Adam Antal

(cherry picked from commit 3d249301f42130de80ab3ecf8c783bb51b0fa8a3)
---
 .../org/apache/hadoop/io/retry/RetryPolicies.java | 12 +++-
 .../org/apache/hadoop/io/retry/TestRetryProxy.java| 19 +++
 .../hadoop/io/retry/UnreliableImplementation.java |  7 +++
 .../apache/hadoop/io/retry/UnreliableInterface.java   |  4 
 4 files changed, 41 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index a89c3a7..fcbcc86 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -690,7 +690,8 @@ public class RetryPolicies {
   } else if (e instanceof InvalidToken) {
 return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
 "Invalid or Cancelled Token");
-  } else if (e instanceof AccessControlException) {
+  } else if (e instanceof AccessControlException ||
+  hasWrappedAccessControlException(e)) {
 return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
 "Access denied");
   } else if (e instanceof SocketException
@@ -761,4 +762,13 @@ public class RetryPolicies {
 return unwrapped instanceof RetriableException ? 
 (RetriableException) unwrapped : null;
   }
+
+  private static boolean hasWrappedAccessControlException(Exception e) {
+Throwable throwable = e;
+while (!(throwable instanceof AccessControlException) &&
+throwable.getCause() != null) {
+  throwable = throwable.getCause();
+}
+return throwable instanceof AccessControlException;
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
index 2116fb2..a1135a0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -377,4 +377,23 @@ public class TestRetryProxy {
   assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
 }
   }
+
+  @Test
+  public void testWrappedAccessControlException() throws Exception {
+RetryPolicy policy = mock(RetryPolicy.class);
+RetryPolicy realPolicy = RetryPolicies.failoverOnNetworkException(5);
+setupMockPolicy(policy, realPolicy);
+
+UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create(
+UnreliableInterface.class, unreliableImpl, policy);
+
+try {
+  unreliable.failsWithWrappedAccessControlException();
+  fail("Should fail");
+} catch (IOException expected) {
+  verify(policy, times(1)).shouldRetry(any(Exception.class), anyInt(),
+  anyInt(), anyBoolean());
+  assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
+}
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
index 97031fd..3734474 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
@@ -139,6 +139,13 @@ class UnreliableImplementation implements 
UnreliableInterface {
 }
   }
 
+  public void failsWithWrappedAccessControlException()
+  throws IOException {
+AccessControlException ace = new AccessControlException();
+IOException ioe = new IOException(ace);
+throw new IOException(ioe);
+  }
+
   @Override
   public String succeedsOnceThenFailsReturningString()
   throws UnreliableException, IOException, StandbyException {
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
index 738a760..80bf47d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
+++ 
b/hadoop-common-project/hadoop-common/sr

[hadoop] 01/02: HADOOP-16580. Disable retry of FailoverOnNetworkExceptionRetry in case of AccessControlException. Contributed by Adam Antal

2020-01-24 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2873a705f4b06b80e0685b6593048d55a51d15c1
Author: Kihwal Lee 
AuthorDate: Fri Jan 24 11:07:06 2020 -0600

HADOOP-16580. Disable retry of FailoverOnNetworkExceptionRetry in case of 
AccessControlException. Contributed by Adam Antal

(cherry picked from commit c79a5f2d9930f58ad95864c59cd0a6164cd53280)
---
 .../org/apache/hadoop/io/retry/RetryPolicies.java  |  4 +++
 .../org/apache/hadoop/io/retry/TestRetryProxy.java | 29 ++
 .../hadoop/io/retry/UnreliableImplementation.java  | 16 
 .../hadoop/io/retry/UnreliableInterface.java   | 17 +
 4 files changed, 66 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 47efbbc..a89c3a7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.ConnectTimeoutException;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.ietf.jgss.GSSException;
 
@@ -689,6 +690,9 @@ public class RetryPolicies {
   } else if (e instanceof InvalidToken) {
 return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
 "Invalid or Cancelled Token");
+  } else if (e instanceof AccessControlException) {
+return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
+"Access denied");
   } else if (e instanceof SocketException
   || (e instanceof IOException && !(e instanceof RemoteException))) {
 if (isIdempotentOrAtMostOnce) {
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
index 1accb0a0..2116fb2 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -25,6 +25,7 @@ import 
org.apache.hadoop.io.retry.UnreliableInterface.FatalException;
 import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.AccessControlException;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
@@ -48,6 +49,14 @@ import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyInt;
 import static org.mockito.Mockito.*;
 
+/**
+ * TestRetryProxy tests the behaviour of the {@link RetryPolicy} class using
+ * a certain method of {@link UnreliableInterface} implemented by
+ * {@link UnreliableImplementation}.
+ *
+ * Some methods may be sensitive to the {@link Idempotent} annotation
+ * (annotated in {@link UnreliableInterface}).
+ */
 public class TestRetryProxy {
   
   private UnreliableImplementation unreliableImpl;
@@ -348,4 +357,24 @@ public class TestRetryProxy {
   assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
 }
   }
+
+  @Test
+  public void testNoRetryOnAccessControlException() throws Exception {
+RetryPolicy policy = mock(RetryPolicy.class);
+RetryPolicy realPolicy = RetryPolicies.failoverOnNetworkException(5);
+setupMockPolicy(policy, realPolicy);
+
+UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create(
+UnreliableInterface.class, unreliableImpl, policy);
+
+try {
+  unreliable.failsWithAccessControlExceptionEightTimes();
+  fail("Should fail");
+} catch (AccessControlException e) {
+  // expected
+  verify(policy, times(1)).shouldRetry(any(Exception.class), anyInt(),
+  anyInt(), anyBoolean());
+  assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
+}
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
index 85bd598..97031fd 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImpleme

[hadoop] branch branch-2.10 updated (296786a -> 8d4671b)

2020-01-24 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a change to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 296786a  YARN-9790. Failed to set default-application-lifetime if 
maximum-application-lifetime is less than or equal to zero. Contributed by 
kyungwan nam.
 new 2873a70  HADOOP-16580. Disable retry of 
FailoverOnNetworkExceptionRetry in case of AccessControlException. Contributed 
by Adam Antal
 new 8d4671b  HADOOP-16683. Disable retry of 
FailoverOnNetworkExceptionRetry in case of wrapped AccessControlException. 
Contributed by Adam Antal

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/hadoop/io/retry/RetryPolicies.java  | 14 +++
 .../org/apache/hadoop/io/retry/TestRetryProxy.java | 48 ++
 .../hadoop/io/retry/UnreliableImplementation.java  | 23 +++
 .../hadoop/io/retry/UnreliableInterface.java   | 21 ++
 4 files changed, 106 insertions(+)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-15119. Allow expiration of cached locations in DFSInputStream. Contributed by Ahmed Hussein.

2020-01-24 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 7dd43ce  HDFS-15119. Allow expiration of cached locations in 
DFSInputStream. Contributed by Ahmed Hussein.
7dd43ce is described below

commit 7dd43cec7f7f737e4998f5ca569eeac3f4919d9c
Author: Kihwal Lee 
AuthorDate: Fri Jan 24 09:53:26 2020 -0600

HDFS-15119. Allow expiration of cached locations in DFSInputStream.
Contributed by Ahmed Hussein.

(cherry picked from commit d10f77e3c91225f86ed9c0f0e6a9adf2e1434674)
(cherry picked from commit 1bb9667137bc80e63dea3e53bb2746be011a2a9a)
---
 .../java/org/apache/hadoop/hdfs/DFSClient.java |   4 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java |  92 ++-
 .../hadoop/hdfs/client/HdfsClientConfigKeys.java   |   5 +
 .../hadoop/hdfs/client/impl/DfsClientConf.java |  15 ++
 .../src/main/resources/hdfs-default.xml|   8 +
 .../hdfs/TestDFSInputStreamBlockLocations.java | 290 +
 6 files changed, 408 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index ddfe98f..39e5702 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -837,6 +837,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 namenode.reportBadBlocks(blocks);
   }
 
+  public long getRefreshReadBlkLocationsInterval() {
+return dfsClientConf.getRefreshReadBlockLocationsMS();
+  }
+
   public LocatedBlocks getLocatedBlocks(String src, long start)
   throws IOException {
 return getLocatedBlocks(src, start, dfsClientConf.getPrefetchSize());
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 8355cec..2aad175 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.IdentityHashStore;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.htrace.core.SpanId;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -132,6 +133,10 @@ public class DFSInputStream extends FSInputStream
   //   (it's OK to acquire this lock when the lock on  is held)
   protected final Object infoLock = new Object();
 
+  // refresh locatedBlocks periodically
+  private final long refreshReadBlockIntervals;
+  /** timeStamp of the last time a block location was refreshed. */
+  private long locatedBlocksTimeStamp;
   /**
* Track the ByteBuffers that we have handed out to readers.
*
@@ -148,6 +153,10 @@ public class DFSInputStream extends FSInputStream
 return extendedReadBuffers;
   }
 
+  private boolean isPeriodicRefreshEnabled() {
+return (refreshReadBlockIntervals > 0L);
+  }
+
   /**
* This variable tracks the number of failures since the start of the
* most recent user-facing operation. That is to say, it should be reset
@@ -161,7 +170,7 @@ public class DFSInputStream extends FSInputStream
*/
   protected int failures = 0;
 
-  /* XXX Use of CocurrentHashMap is temp fix. Need to fix
+  /* XXX Use of ConcurrentHashMap is temp fix. Need to fix
* parallel accesses to DFSInputStream (through ptreads) properly */
   private final ConcurrentHashMap deadNodes =
  new ConcurrentHashMap<>();
@@ -175,6 +184,9 @@ public class DFSInputStream extends FSInputStream
   DFSInputStream(DFSClient dfsClient, String src, boolean verifyChecksum,
   LocatedBlocks locatedBlocks) throws IOException {
 this.dfsClient = dfsClient;
+this.refreshReadBlockIntervals =
+this.dfsClient.getRefreshReadBlkLocationsInterval();
+setLocatedBlocksTimeStamp();
 this.verifyChecksum = verifyChecksum;
 this.src = src;
 synchronized (infoLock) {
@@ -185,10 +197,28 @@ public class DFSInputStream extends FSInputStream
   }
 
   @VisibleForTesting
-  public long getlastBlockBeingWrittenLengthForTesting() {
+  long getlastBlockBeingWrittenLengthForTesting() {
 return lastBlockBeingWrittenLength;
   }
 
+  @VisibleForTesting
+  boolean deadNodesContain(DatanodeInfo nodeInfo) {
+return deadNodes.containsKey(nodeInfo);
+  }
+
+  @VisibleForTesting
+  void setReadTimeStampsForTesting(long 

[hadoop] branch branch-3.2 updated: HDFS-15119. Allow expiration of cached locations in DFSInputStream. Contributed by Ahmed Hussein.

2020-01-24 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 1bb9667  HDFS-15119. Allow expiration of cached locations in 
DFSInputStream. Contributed by Ahmed Hussein.
1bb9667 is described below

commit 1bb9667137bc80e63dea3e53bb2746be011a2a9a
Author: Kihwal Lee 
AuthorDate: Fri Jan 24 09:21:09 2020 -0600

HDFS-15119. Allow expiration of cached locations in DFSInputStream.
Contributed by Ahmed Hussein.

(cherry picked from commit d10f77e3c91225f86ed9c0f0e6a9adf2e1434674)
---
 .../java/org/apache/hadoop/hdfs/DFSClient.java |   4 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java |  92 ++-
 .../hadoop/hdfs/client/HdfsClientConfigKeys.java   |   5 +
 .../hadoop/hdfs/client/impl/DfsClientConf.java |  15 ++
 .../src/main/resources/hdfs-default.xml|   8 +
 .../hdfs/TestDFSInputStreamBlockLocations.java | 290 +
 6 files changed, 408 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index fac0577..614fc68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -837,6 +837,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 namenode.reportBadBlocks(blocks);
   }
 
+  public long getRefreshReadBlkLocationsInterval() {
+return dfsClientConf.getRefreshReadBlockLocationsMS();
+  }
+
   public LocatedBlocks getLocatedBlocks(String src, long start)
   throws IOException {
 return getLocatedBlocks(src, start, dfsClientConf.getPrefetchSize());
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index d211e47..a4bf454 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -84,6 +84,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.IdentityHashStore;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.htrace.core.SpanId;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -133,6 +134,10 @@ public class DFSInputStream extends FSInputStream
   //   (it's OK to acquire this lock when the lock on  is held)
   protected final Object infoLock = new Object();
 
+  // refresh locatedBlocks periodically
+  private final long refreshReadBlockIntervals;
+  /** timeStamp of the last time a block location was refreshed. */
+  private long locatedBlocksTimeStamp;
   /**
* Track the ByteBuffers that we have handed out to readers.
*
@@ -149,6 +154,10 @@ public class DFSInputStream extends FSInputStream
 return extendedReadBuffers;
   }
 
+  private boolean isPeriodicRefreshEnabled() {
+return (refreshReadBlockIntervals > 0L);
+  }
+
   /**
* This variable tracks the number of failures since the start of the
* most recent user-facing operation. That is to say, it should be reset
@@ -162,7 +171,7 @@ public class DFSInputStream extends FSInputStream
*/
   protected int failures = 0;
 
-  /* XXX Use of CocurrentHashMap is temp fix. Need to fix
+  /* XXX Use of ConcurrentHashMap is temp fix. Need to fix
* parallel accesses to DFSInputStream (through ptreads) properly */
   private final ConcurrentHashMap deadNodes =
  new ConcurrentHashMap<>();
@@ -176,6 +185,9 @@ public class DFSInputStream extends FSInputStream
   DFSInputStream(DFSClient dfsClient, String src, boolean verifyChecksum,
   LocatedBlocks locatedBlocks) throws IOException {
 this.dfsClient = dfsClient;
+this.refreshReadBlockIntervals =
+this.dfsClient.getRefreshReadBlkLocationsInterval();
+setLocatedBlocksTimeStamp();
 this.verifyChecksum = verifyChecksum;
 this.src = src;
 synchronized (infoLock) {
@@ -186,10 +198,28 @@ public class DFSInputStream extends FSInputStream
   }
 
   @VisibleForTesting
-  public long getlastBlockBeingWrittenLengthForTesting() {
+  long getlastBlockBeingWrittenLengthForTesting() {
 return lastBlockBeingWrittenLength;
   }
 
+  @VisibleForTesting
+  boolean deadNodesContain(DatanodeInfo nodeInfo) {
+return deadNodes.containsKey(nodeInfo);
+  }
+
+  @VisibleForTesting
+  void setReadTimeStampsForTesting(long timeStamp) {
+setLocatedBlocksTimeStamp(timeStamp);
+  }
+
+  pri

[hadoop] branch trunk updated: HDFS-15119. Allow expiration of cached locations in DFSInputStream. Contributed by Ahmed Hussein.

2020-01-24 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d10f77e  HDFS-15119. Allow expiration of cached locations in 
DFSInputStream. Contributed by Ahmed Hussein.
d10f77e is described below

commit d10f77e3c91225f86ed9c0f0e6a9adf2e1434674
Author: Kihwal Lee 
AuthorDate: Fri Jan 24 09:15:27 2020 -0600

HDFS-15119. Allow expiration of cached locations in DFSInputStream.
Contributed by Ahmed Hussein.
---
 .../java/org/apache/hadoop/hdfs/DFSClient.java |   4 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java |  92 ++-
 .../hadoop/hdfs/client/HdfsClientConfigKeys.java   |   5 +
 .../hadoop/hdfs/client/impl/DfsClientConf.java |  15 ++
 .../src/main/resources/hdfs-default.xml|   8 +
 .../hdfs/TestDFSInputStreamBlockLocations.java | 290 +
 6 files changed, 408 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 9bb28f1..e0eaa19 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -844,6 +844,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 namenode.reportBadBlocks(blocks);
   }
 
+  public long getRefreshReadBlkLocationsInterval() {
+return dfsClientConf.getRefreshReadBlockLocationsMS();
+  }
+
   public LocatedBlocks getLocatedBlocks(String src, long start)
   throws IOException {
 return getLocatedBlocks(src, start, dfsClientConf.getPrefetchSize());
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 9827534..af9891a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.IdentityHashStore;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -135,6 +136,10 @@ public class DFSInputStream extends FSInputStream
   //   (it's OK to acquire this lock when the lock on  is held)
   protected final Object infoLock = new Object();
 
+  // refresh locatedBlocks periodically
+  private final long refreshReadBlockIntervals;
+  /** timeStamp of the last time a block location was refreshed. */
+  private long locatedBlocksTimeStamp;
   /**
* Track the ByteBuffers that we have handed out to readers.
*
@@ -151,6 +156,10 @@ public class DFSInputStream extends FSInputStream
 return extendedReadBuffers;
   }
 
+  private boolean isPeriodicRefreshEnabled() {
+return (refreshReadBlockIntervals > 0L);
+  }
+
   /**
* This variable tracks the number of failures since the start of the
* most recent user-facing operation. That is to say, it should be reset
@@ -164,7 +173,7 @@ public class DFSInputStream extends FSInputStream
*/
   protected int failures = 0;
 
-  /* XXX Use of CocurrentHashMap is temp fix. Need to fix
+  /* XXX Use of ConcurrentHashMap is temp fix. Need to fix
* parallel accesses to DFSInputStream (through ptreads) properly */
   private final ConcurrentHashMap deadNodes =
  new ConcurrentHashMap<>();
@@ -194,6 +203,9 @@ public class DFSInputStream extends FSInputStream
   DFSInputStream(DFSClient dfsClient, String src, boolean verifyChecksum,
   LocatedBlocks locatedBlocks) throws IOException {
 this.dfsClient = dfsClient;
+this.refreshReadBlockIntervals =
+this.dfsClient.getRefreshReadBlkLocationsInterval();
+setLocatedBlocksTimeStamp();
 this.verifyChecksum = verifyChecksum;
 this.src = src;
 synchronized (infoLock) {
@@ -204,10 +216,28 @@ public class DFSInputStream extends FSInputStream
   }
 
   @VisibleForTesting
-  public long getlastBlockBeingWrittenLengthForTesting() {
+  long getlastBlockBeingWrittenLengthForTesting() {
 return lastBlockBeingWrittenLength;
   }
 
+  @VisibleForTesting
+  boolean deadNodesContain(DatanodeInfo nodeInfo) {
+return deadNodes.containsKey(nodeInfo);
+  }
+
+  @VisibleForTesting
+  void setReadTimeStampsForTesting(long timeStamp) {
+setLocatedBlocksTimeStamp(timeStamp);
+  }
+
+  private void setLocatedBlocksTimeStamp() {
+setLocatedBlocksTimeStamp(Time.monotonicNow());
+  }
+
+  pri

[hadoop] branch branch-2.10 updated: HDFS-14968. Add ability to log stale datanodes. Contributed by Ahmed Hussein.

2020-01-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 0ead38b  HDFS-14968. Add ability to log stale datanodes. Contributed 
by Ahmed Hussein.
0ead38b is described below

commit 0ead38b59a31100880bed0c3ad68dc355a53e145
Author: Kihwal Lee 
AuthorDate: Wed Jan 22 09:30:41 2020 -0600

HDFS-14968. Add ability to log stale datanodes. Contributed by Ahmed 
Hussein.

(cherry picked from commit bd03053ea2f32ef982e37fbf2ffd679cb7dda797)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java

(cherry picked from commit 484270832064c84122348e663aaf5927ed411ebb)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   5 +
 .../server/blockmanagement/HeartbeatManager.java   | 105 +++--
 .../src/main/resources/hdfs-default.xml|   8 ++
 3 files changed, 108 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index aff6b8b..e0b3a77 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -364,6 +364,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   // Whether to enable datanode's stale state detection and usage for writes
   public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY = 
"dfs.namenode.avoid.write.stale.datanode";
   public static final boolean 
DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT = false;
+  // enable and disable logging datanode staleness. Disabled by default.
+  public static final String DFS_NAMENODE_ENABLE_LOG_STALE_DATANODE_KEY =
+  "dfs.namenode.enable.log.stale.datanode";
+  public static final boolean DFS_NAMENODE_ENABLE_LOG_STALE_DATANODE_DEFAULT =
+  false;
   // The default value of the time interval for marking datanodes as stale
   public static final String DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY = 
"dfs.namenode.stale.datanode.interval";
   public static final long DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT = 30 * 
1000; // 30s
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index a72ad64..09d015d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -18,8 +18,10 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
@@ -43,7 +45,15 @@ import com.google.common.annotations.VisibleForTesting;
  */
 class HeartbeatManager implements DatanodeStatistics {
   static final Logger LOG = LoggerFactory.getLogger(HeartbeatManager.class);
-
+  private static final String REPORT_DELTA_STALE_DN_HEADER =
+  "StaleNodes Report: [New Stale Nodes]: %d";
+  private static final String REPORT_STALE_DN_LINE_ENTRY = "%n\t %s";
+  private static final String REPORT_STALE_DN_LINE_TAIL = ", %s";
+  private static final String REPORT_REMOVE_DEAD_NODE_ENTRY =
+  "StaleNodes Report: [Remove DeadNode]: %s";
+  private static final String REPORT_REMOVE_STALE_NODE_ENTRY =
+  "StaleNodes Report: [Remove StaleNode]: %s";
+  private static final int REPORT_STALE_NODE_NODES_PER_LINE = 10;
   /**
* Stores a subset of the datanodeMap in DatanodeManager,
* containing nodes that are considered alive.
@@ -56,14 +66,19 @@ class HeartbeatManager implements DatanodeStatistics {
   /** Statistics, which are synchronized by the heartbeat manager lock. */
   private final DatanodeStats stats = new DatanodeStats();
 
-  /** The time period to check for expired datanodes */
+  /** The time period to check for expired datanodes. */
   private final long heartbeatRecheckInterval;
-  /** Heartbeat monitor thread */
+  /** Heartbeat monitor thread. */
   private final Daemon heartbeatThread = new Daemon(new Monitor());
   private final StopWatch heartbeatStopWatch = new StopWatch();
 
   final Namesystem namesystem;
   final BlockManager blockManager;
+  /** Enable log for datanode staleness. */
+  private final boolean en

[hadoop] branch branch-3.1 updated: HDFS-14968. Add ability to log stale datanodes. Contributed by Ahmed Hussein.

2020-01-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new b4e9725  HDFS-14968. Add ability to log stale datanodes. Contributed 
by Ahmed Hussein.
b4e9725 is described below

commit b4e97259555c46b6fdc396063e5b07c79843c3ee
Author: Kihwal Lee 
AuthorDate: Wed Jan 22 09:28:01 2020 -0600

HDFS-14968. Add ability to log stale datanodes. Contributed by Ahmed 
Hussein.

(cherry picked from commit bd03053ea2f32ef982e37fbf2ffd679cb7dda797)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java

(cherry picked from commit 484270832064c84122348e663aaf5927ed411ebb)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   5 +
 .../server/blockmanagement/HeartbeatManager.java   | 105 +++--
 .../src/main/resources/hdfs-default.xml|   8 ++
 3 files changed, 108 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 5c018d4..55de325 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -451,6 +451,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   // Whether to enable datanode's stale state detection and usage for writes
   public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY = 
"dfs.namenode.avoid.write.stale.datanode";
   public static final boolean 
DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT = false;
+  // enable and disable logging datanode staleness. Disabled by default.
+  public static final String DFS_NAMENODE_ENABLE_LOG_STALE_DATANODE_KEY =
+  "dfs.namenode.enable.log.stale.datanode";
+  public static final boolean DFS_NAMENODE_ENABLE_LOG_STALE_DATANODE_DEFAULT =
+  false;
   // The default value of the time interval for marking datanodes as stale
   public static final String DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY = 
"dfs.namenode.stale.datanode.interval";
   public static final long DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT = 30 * 
1000; // 30s
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index d2c279f..6a52b97 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -18,8 +18,10 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
@@ -43,7 +45,15 @@ import com.google.common.annotations.VisibleForTesting;
  */
 class HeartbeatManager implements DatanodeStatistics {
   static final Logger LOG = LoggerFactory.getLogger(HeartbeatManager.class);
-
+  private static final String REPORT_DELTA_STALE_DN_HEADER =
+  "StaleNodes Report: [New Stale Nodes]: %d";
+  private static final String REPORT_STALE_DN_LINE_ENTRY = "%n\t %s";
+  private static final String REPORT_STALE_DN_LINE_TAIL = ", %s";
+  private static final String REPORT_REMOVE_DEAD_NODE_ENTRY =
+  "StaleNodes Report: [Remove DeadNode]: %s";
+  private static final String REPORT_REMOVE_STALE_NODE_ENTRY =
+  "StaleNodes Report: [Remove StaleNode]: %s";
+  private static final int REPORT_STALE_NODE_NODES_PER_LINE = 10;
   /**
* Stores a subset of the datanodeMap in DatanodeManager,
* containing nodes that are considered alive.
@@ -56,14 +66,19 @@ class HeartbeatManager implements DatanodeStatistics {
   /** Statistics, which are synchronized by the heartbeat manager lock. */
   private final DatanodeStats stats = new DatanodeStats();
 
-  /** The time period to check for expired datanodes */
+  /** The time period to check for expired datanodes. */
   private final long heartbeatRecheckInterval;
-  /** Heartbeat monitor thread */
+  /** Heartbeat monitor thread. */
   private final Daemon heartbeatThread = new Daemon(new Monitor());
   private final StopWatch heartbeatStopWatch = new StopWatch();
 
   final Namesystem namesystem;
   final BlockManager blockManager;
+  /** Enable log for datanode staleness. */
+  private final boolean en

[hadoop] branch branch-3.2 updated: HDFS-14968. Add ability to log stale datanodes. Contributed by Ahmed Hussein.

2020-01-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4842708  HDFS-14968. Add ability to log stale datanodes. Contributed 
by Ahmed Hussein.
4842708 is described below

commit 484270832064c84122348e663aaf5927ed411ebb
Author: Kihwal Lee 
AuthorDate: Wed Jan 22 09:21:14 2020 -0600

HDFS-14968. Add ability to log stale datanodes. Contributed by Ahmed 
Hussein.

(cherry picked from commit bd03053ea2f32ef982e37fbf2ffd679cb7dda797)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   5 +
 .../server/blockmanagement/HeartbeatManager.java   | 105 +++--
 .../src/main/resources/hdfs-default.xml|   8 ++
 3 files changed, 108 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e3236ca..dcfe401 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -464,6 +464,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   // Whether to enable datanode's stale state detection and usage for writes
   public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY = 
"dfs.namenode.avoid.write.stale.datanode";
   public static final boolean 
DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT = false;
+  // enable and disable logging datanode staleness. Disabled by default.
+  public static final String DFS_NAMENODE_ENABLE_LOG_STALE_DATANODE_KEY =
+  "dfs.namenode.enable.log.stale.datanode";
+  public static final boolean DFS_NAMENODE_ENABLE_LOG_STALE_DATANODE_DEFAULT =
+  false;
   // The default value of the time interval for marking datanodes as stale
   public static final String DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY = 
"dfs.namenode.stale.datanode.interval";
   public static final long DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT = 30 * 
1000; // 30s
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index d2c279f..6a52b97 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -18,8 +18,10 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
@@ -43,7 +45,15 @@ import com.google.common.annotations.VisibleForTesting;
  */
 class HeartbeatManager implements DatanodeStatistics {
   static final Logger LOG = LoggerFactory.getLogger(HeartbeatManager.class);
-
+  private static final String REPORT_DELTA_STALE_DN_HEADER =
+  "StaleNodes Report: [New Stale Nodes]: %d";
+  private static final String REPORT_STALE_DN_LINE_ENTRY = "%n\t %s";
+  private static final String REPORT_STALE_DN_LINE_TAIL = ", %s";
+  private static final String REPORT_REMOVE_DEAD_NODE_ENTRY =
+  "StaleNodes Report: [Remove DeadNode]: %s";
+  private static final String REPORT_REMOVE_STALE_NODE_ENTRY =
+  "StaleNodes Report: [Remove StaleNode]: %s";
+  private static final int REPORT_STALE_NODE_NODES_PER_LINE = 10;
   /**
* Stores a subset of the datanodeMap in DatanodeManager,
* containing nodes that are considered alive.
@@ -56,14 +66,19 @@ class HeartbeatManager implements DatanodeStatistics {
   /** Statistics, which are synchronized by the heartbeat manager lock. */
   private final DatanodeStats stats = new DatanodeStats();
 
-  /** The time period to check for expired datanodes */
+  /** The time period to check for expired datanodes. */
   private final long heartbeatRecheckInterval;
-  /** Heartbeat monitor thread */
+  /** Heartbeat monitor thread. */
   private final Daemon heartbeatThread = new Daemon(new Monitor());
   private final StopWatch heartbeatStopWatch = new StopWatch();
 
   final Namesystem namesystem;
   final BlockManager blockManager;
+  /** Enable log for datanode staleness. */
+  private final boolean enableLogStaleNodes;
+
+  /** reports for stale datanodes. */
+  private final Set sta

[hadoop] branch trunk updated: HDFS-14968. Add ability to log stale datanodes. Contributed by Ahmed Hussein.

2020-01-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new bd03053  HDFS-14968. Add ability to log stale datanodes. Contributed 
by Ahmed Hussein.
bd03053 is described below

commit bd03053ea2f32ef982e37fbf2ffd679cb7dda797
Author: Kihwal Lee 
AuthorDate: Wed Jan 22 09:14:38 2020 -0600

HDFS-14968. Add ability to log stale datanodes. Contributed by Ahmed 
Hussein.
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   5 +
 .../server/blockmanagement/HeartbeatManager.java   | 109 ++---
 .../src/main/resources/hdfs-default.xml|   8 ++
 3 files changed, 109 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c8f031e..7856419 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -483,6 +483,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   // Whether to enable datanode's stale state detection and usage for writes
   public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY = 
"dfs.namenode.avoid.write.stale.datanode";
   public static final boolean 
DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT = false;
+  // enable and disable logging datanode staleness. Disabled by default.
+  public static final String DFS_NAMENODE_ENABLE_LOG_STALE_DATANODE_KEY =
+  "dfs.namenode.enable.log.stale.datanode";
+  public static final boolean DFS_NAMENODE_ENABLE_LOG_STALE_DATANODE_DEFAULT =
+  false;
   // The default value of the time interval for marking datanodes as stale
   public static final String DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY = 
"dfs.namenode.stale.datanode.interval";
   public static final long DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT = 30 * 
1000; // 30s
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index 46444bc..9e4d867 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -18,8 +18,10 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
@@ -43,7 +45,15 @@ import com.google.common.annotations.VisibleForTesting;
  */
 class HeartbeatManager implements DatanodeStatistics {
   static final Logger LOG = LoggerFactory.getLogger(HeartbeatManager.class);
-
+  private static final String REPORT_DELTA_STALE_DN_HEADER =
+  "StaleNodes Report: [New Stale Nodes]: %d";
+  private static final String REPORT_STALE_DN_LINE_ENTRY = "%n\t %s";
+  private static final String REPORT_STALE_DN_LINE_TAIL = ", %s";
+  private static final String REPORT_REMOVE_DEAD_NODE_ENTRY =
+  "StaleNodes Report: [Remove DeadNode]: %s";
+  private static final String REPORT_REMOVE_STALE_NODE_ENTRY =
+  "StaleNodes Report: [Remove StaleNode]: %s";
+  private static final int REPORT_STALE_NODE_NODES_PER_LINE = 10;
   /**
* Stores a subset of the datanodeMap in DatanodeManager,
* containing nodes that are considered alive.
@@ -56,14 +66,19 @@ class HeartbeatManager implements DatanodeStatistics {
   /** Statistics, which are synchronized by the heartbeat manager lock. */
   private final DatanodeStats stats = new DatanodeStats();
 
-  /** The time period to check for expired datanodes */
+  /** The time period to check for expired datanodes. */
   private final long heartbeatRecheckInterval;
-  /** Heartbeat monitor thread */
+  /** Heartbeat monitor thread. */
   private final Daemon heartbeatThread = new Daemon(new Monitor());
   private final StopWatch heartbeatStopWatch = new StopWatch();
 
   final Namesystem namesystem;
   final BlockManager blockManager;
+  /** Enable log for datanode staleness. */
+  private final boolean enableLogStaleNodes;
+
+  /** reports for stale datanodes. */
+  private final Set staleDataNodes = new HashSet<>();
 
   HeartbeatManager(final Namesystem namesystem,
   final BlockManager blockManager, final Configuration conf) {
@@ -78,6 +93,9 @@ class H

[hadoop] branch branch-2.10 updated: HDFS-15125. Pull back HDFS-11353, HDFS-13993, HDFS-13945, and HDFS-14324 to branch-2.10. Contributed by Jim Brennan.

2020-01-21 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new c5d43b6  HDFS-15125. Pull back HDFS-11353, HDFS-13993, HDFS-13945, and 
HDFS-14324 to branch-2.10. Contributed by Jim Brennan.
c5d43b6 is described below

commit c5d43b65a904d3b86909b7e3509336d7b4f07a67
Author: Kihwal Lee 
AuthorDate: Tue Jan 21 09:59:14 2020 -0600

HDFS-15125. Pull back HDFS-11353, HDFS-13993, HDFS-13945, and HDFS-14324
to branch-2.10. Contributed by Jim Brennan.
---
 .../datanode/TestDataNodeHotSwapVolumes.java   |  9 +--
 .../server/datanode/TestDataNodeVolumeFailure.java | 73 ++
 .../TestDataNodeVolumeFailureReporting.java| 12 +++-
 .../TestDataNodeVolumeFailureToleration.java   |  6 ++
 4 files changed, 50 insertions(+), 50 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index ea28ea4..93c1242 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -939,8 +939,7 @@ public class TestDataNodeHotSwapVolumes {
*/
   @Test(timeout=6)
   public void testDirectlyReloadAfterCheckDiskError()
-  throws IOException, TimeoutException, InterruptedException,
-  ReconfigurationException {
+  throws Exception {
 // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
 // volume failures which is currently not supported on Windows.
 assumeTrue(!Path.WINDOWS);
@@ -959,11 +958,7 @@ public class TestDataNodeHotSwapVolumes {
 
 DataNodeTestUtils.injectDataDirFailure(dirToFail);
 // Call and wait DataNode to detect disk failure.
-long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
-dn.checkDiskErrorAsync(failedVolume);
-while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
-  Thread.sleep(100);
-}
+DataNodeTestUtils.waitForDiskError(dn, failedVolume);
 
 createFile(new Path("/test1"), 32, (short)2);
 assertEquals(used, failedVolume.getDfsUsed());
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index bafc7e0..a0ffe20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -35,16 +35,15 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
 
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.filefilter.TrueFileFilter;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FsTracer;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockReader;
-import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory;
 import org.apache.hadoop.hdfs.ClientContext;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -52,6 +51,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
+import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -75,20 +75,17 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.filefilter.TrueFileFilter;
-
-import com.google.common.base.Supplier;
-
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.internal.AssumptionViolatedException;
-
+import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Supplier;
+
 /**
  * Fine-grain testing of block files and locations after volume failure.
  */
@@ -114,6 +111,10 @@ public class TestDataNodeVol

[hadoop] branch branch-3.1 updated: HDFS-15095. Fix TestDecommissioningStatus. Contributed by Ahmed Hussein.

2020-01-10 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 82fa8d8  HDFS-15095. Fix TestDecommissioningStatus. Contributed by 
Ahmed Hussein.
82fa8d8 is described below

commit 82fa8d88ef1f5b8daf08ae80c6e47e2151882a76
Author: Kihwal Lee 
AuthorDate: Fri Jan 10 12:47:49 2020 -0600

HDFS-15095. Fix TestDecommissioningStatus. Contributed by Ahmed Hussein.

(cherry picked from commit 5fb901ac4017b4f13b089ecd920e864cd53ad3a6)
---
 .../server/namenode/TestDecommissioningStatus.java | 31 --
 1 file changed, 29 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index cfebff7..d9d1f47 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -27,6 +27,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import java.util.concurrent.TimeoutException;
 import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.junit.After;
@@ -199,6 +201,23 @@ public class TestDecommissioningStatus {
   }
 
   /**
+   * Allows the main thread to block until the decommission is checked by the
+   * admin manager.
+   * @param dnAdminMgr admin instance in the datanode manager.
+   * @param trackedNumber number of nodes expected to be DECOMMISSIONED or
+   *IN_MAINTENANCE.
+   * @throws TimeoutException
+   * @throws InterruptedException
+   */
+  private void waitForDecommissionedNodes(final DatanodeAdminManager 
dnAdminMgr,
+  final int trackedNumber)
+  throws TimeoutException, InterruptedException {
+GenericTestUtils
+.waitFor(() -> dnAdminMgr.getNumTrackedNodes() == trackedNumber,
+100, 2000);
+  }
+
+  /**
* Tests Decommissioning Status in DFS.
*/
   @Test
@@ -233,6 +252,8 @@ public class TestDecommissioningStatus {
   dm.refreshNodes(conf);
   decommissionedNodes.add(downnode);
   BlockManagerTestUtil.recheckDecommissionState(dm);
+  // Block until the admin's monitor updates the number of tracked nodes.
+  waitForDecommissionedNodes(dm.getDatanodeAdminManager(), iteration + 1);
   final List decommissioningNodes = 
dm.getDecommissioningNodes();
   if (iteration == 0) {
 assertEquals(decommissioningNodes.size(), 1);
@@ -309,11 +330,11 @@ public class TestDecommissioningStatus {
 
 // Force DatanodeManager to check decommission state.
 BlockManagerTestUtil.recheckDecommissionState(dm);
-
+// Block until the admin's monitor updates the number of tracked nodes.
+waitForDecommissionedNodes(dm.getDatanodeAdminManager(), 1);
 // Verify that the DN remains in DECOMMISSION_INPROGRESS state.
 assertTrue("the node should be DECOMMISSION_IN_PROGRESSS",
 dead.get(0).isDecommissionInProgress());
-
 // Check DatanodeManager#getDecommissionNodes, make sure it returns
 // the node as decommissioning, even if it's dead
 List decomlist = dm.getDecommissioningNodes();
@@ -323,6 +344,8 @@ public class TestDecommissioningStatus {
 // DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
 AdminStatesBaseTest.cleanupFile(fileSys, f);
 BlockManagerTestUtil.recheckDecommissionState(dm);
+// Block until the admin's monitor updates the number of tracked nodes.
+waitForDecommissionedNodes(dm.getDatanodeAdminManager(), 0);
 assertTrue("the node should be decommissioned",
 dead.get(0).isDecommissioned());
 
@@ -357,6 +380,8 @@ public class TestDecommissioningStatus {
 decommissionNode(dnName);
 dm.refreshNodes(conf);
 BlockManagerTestUtil.recheckDecommissionState(dm);
+// Block until the admin's monitor updates the number of tracked nodes.
+waitForDecommissionedNodes(dm.getDatanodeAdminManager(), 0);
 assertTrue(dnDescriptor.isDecommissioned());
 
 // Add the node back
@@ -405,6 +430,8 @@ public class TestDecommissioningStatus {
 hostsFileWriter.initExcludeHosts(nodes);
  

[hadoop] branch branch-3.2 updated: HDFS-15095. Fix TestDecommissioningStatus. Contributed by Ahmed Hussein.

2020-01-10 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 59260a0  HDFS-15095. Fix TestDecommissioningStatus. Contributed by 
Ahmed Hussein.
59260a0 is described below

commit 59260a0e0e0b99f233a86b9b3342f1e6b75fa7c2
Author: Kihwal Lee 
AuthorDate: Fri Jan 10 12:46:49 2020 -0600

HDFS-15095. Fix TestDecommissioningStatus. Contributed by Ahmed Hussein.

(cherry picked from commit 5fb901ac4017b4f13b089ecd920e864cd53ad3a6)
---
 .../server/namenode/TestDecommissioningStatus.java | 31 --
 1 file changed, 29 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index cfebff7..d9d1f47 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -27,6 +27,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import java.util.concurrent.TimeoutException;
 import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.junit.After;
@@ -199,6 +201,23 @@ public class TestDecommissioningStatus {
   }
 
   /**
+   * Allows the main thread to block until the decommission is checked by the
+   * admin manager.
+   * @param dnAdminMgr admin instance in the datanode manager.
+   * @param trackedNumber number of nodes expected to be DECOMMISSIONED or
+   *IN_MAINTENANCE.
+   * @throws TimeoutException
+   * @throws InterruptedException
+   */
+  private void waitForDecommissionedNodes(final DatanodeAdminManager 
dnAdminMgr,
+  final int trackedNumber)
+  throws TimeoutException, InterruptedException {
+GenericTestUtils
+.waitFor(() -> dnAdminMgr.getNumTrackedNodes() == trackedNumber,
+100, 2000);
+  }
+
+  /**
* Tests Decommissioning Status in DFS.
*/
   @Test
@@ -233,6 +252,8 @@ public class TestDecommissioningStatus {
   dm.refreshNodes(conf);
   decommissionedNodes.add(downnode);
   BlockManagerTestUtil.recheckDecommissionState(dm);
+  // Block until the admin's monitor updates the number of tracked nodes.
+  waitForDecommissionedNodes(dm.getDatanodeAdminManager(), iteration + 1);
   final List decommissioningNodes = 
dm.getDecommissioningNodes();
   if (iteration == 0) {
 assertEquals(decommissioningNodes.size(), 1);
@@ -309,11 +330,11 @@ public class TestDecommissioningStatus {
 
 // Force DatanodeManager to check decommission state.
 BlockManagerTestUtil.recheckDecommissionState(dm);
-
+// Block until the admin's monitor updates the number of tracked nodes.
+waitForDecommissionedNodes(dm.getDatanodeAdminManager(), 1);
 // Verify that the DN remains in DECOMMISSION_INPROGRESS state.
 assertTrue("the node should be DECOMMISSION_IN_PROGRESSS",
 dead.get(0).isDecommissionInProgress());
-
 // Check DatanodeManager#getDecommissionNodes, make sure it returns
 // the node as decommissioning, even if it's dead
 List decomlist = dm.getDecommissioningNodes();
@@ -323,6 +344,8 @@ public class TestDecommissioningStatus {
 // DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
 AdminStatesBaseTest.cleanupFile(fileSys, f);
 BlockManagerTestUtil.recheckDecommissionState(dm);
+// Block until the admin's monitor updates the number of tracked nodes.
+waitForDecommissionedNodes(dm.getDatanodeAdminManager(), 0);
 assertTrue("the node should be decommissioned",
 dead.get(0).isDecommissioned());
 
@@ -357,6 +380,8 @@ public class TestDecommissioningStatus {
 decommissionNode(dnName);
 dm.refreshNodes(conf);
 BlockManagerTestUtil.recheckDecommissionState(dm);
+// Block until the admin's monitor updates the number of tracked nodes.
+waitForDecommissionedNodes(dm.getDatanodeAdminManager(), 0);
 assertTrue(dnDescriptor.isDecommissioned());
 
 // Add the node back
@@ -405,6 +430,8 @@ public class TestDecommissioningStatus {
 hostsFileWriter.initExcludeHosts(nodes);
  

[hadoop] branch trunk updated: HDFS-15095. Fix TestDecommissioningStatus. Contributed by Ahmed Hussein.

2020-01-10 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5fb901a  HDFS-15095. Fix TestDecommissioningStatus. Contributed by 
Ahmed Hussein.
5fb901a is described below

commit 5fb901ac4017b4f13b089ecd920e864cd53ad3a6
Author: Kihwal Lee 
AuthorDate: Fri Jan 10 12:43:21 2020 -0600

HDFS-15095. Fix TestDecommissioningStatus. Contributed by Ahmed Hussein.
---
 .../server/namenode/TestDecommissioningStatus.java | 33 --
 1 file changed, 30 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index ad99c11..800f273 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -27,6 +27,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import java.util.concurrent.TimeoutException;
 import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.junit.After;
@@ -220,6 +222,23 @@ public class TestDecommissioningStatus {
   }
 
   /**
+   * Allows the main thread to block until the decommission is checked by the
+   * admin manager.
+   * @param dnAdminMgr admin instance in the datanode manager.
+   * @param trackedNumber number of nodes expected to be DECOMMISSIONED or
+   *IN_MAINTENANCE.
+   * @throws TimeoutException
+   * @throws InterruptedException
+   */
+  private void waitForDecommissionedNodes(final DatanodeAdminManager 
dnAdminMgr,
+  final int trackedNumber)
+  throws TimeoutException, InterruptedException {
+GenericTestUtils
+.waitFor(() -> dnAdminMgr.getNumTrackedNodes() == trackedNumber,
+100, 2000);
+  }
+
+  /**
* Tests Decommissioning Status in DFS.
*/
   @Test
@@ -254,11 +273,13 @@ public class TestDecommissioningStatus {
   dm.refreshNodes(conf);
   decommissionedNodes.add(downnode);
   BlockManagerTestUtil.recheckDecommissionState(dm);
+  // Block until the admin's monitor updates the number of tracked nodes.
+  waitForDecommissionedNodes(dm.getDatanodeAdminManager(), iteration + 1);
   final List decommissioningNodes = 
dm.getDecommissioningNodes();
   if (iteration == 0) {
 assertEquals(decommissioningNodes.size(), 1);
 DatanodeDescriptor decommNode = decommissioningNodes.get(0);
-   // checkDecommissionStatus(decommNode, 3, 0, 1);
+checkDecommissionStatus(decommNode, 3, 0, 1);
 checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1),
 fileSys, admin);
   } else {
@@ -330,11 +351,11 @@ public class TestDecommissioningStatus {
 
 // Force DatanodeManager to check decommission state.
 BlockManagerTestUtil.recheckDecommissionState(dm);
-
+// Block until the admin's monitor updates the number of tracked nodes.
+waitForDecommissionedNodes(dm.getDatanodeAdminManager(), 1);
 // Verify that the DN remains in DECOMMISSION_INPROGRESS state.
 assertTrue("the node should be DECOMMISSION_IN_PROGRESSS",
 dead.get(0).isDecommissionInProgress());
-
 // Check DatanodeManager#getDecommissionNodes, make sure it returns
 // the node as decommissioning, even if it's dead
 List decomlist = dm.getDecommissioningNodes();
@@ -344,6 +365,8 @@ public class TestDecommissioningStatus {
 // DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
 AdminStatesBaseTest.cleanupFile(fileSys, f);
 BlockManagerTestUtil.recheckDecommissionState(dm);
+// Block until the admin's monitor updates the number of tracked nodes.
+waitForDecommissionedNodes(dm.getDatanodeAdminManager(), 0);
 assertTrue("the node should be decommissioned",
 dead.get(0).isDecommissioned());
 
@@ -378,6 +401,8 @@ public class TestDecommissioningStatus {
 decommissionNode(dnName);
 dm.refreshNodes(conf);
 BlockManagerTestUtil.recheckDecommissionState(dm);
+// Block until the admin's monitor updates the number of tracked nodes.
+waitForDecommissionedNodes(dm.getDatanodeAdminManager

[hadoop] branch branch-2.8 updated: HDFS-12749. DN may not send block report to NN after NN restart. Contributed by Xiaoqiao He.

2019-10-21 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new eec4407  HDFS-12749. DN may not send block report to NN after NN 
restart. Contributed by Xiaoqiao He.
eec4407 is described below

commit eec4407314c7ae2e68ad4301ec1b0e4cbd2dab34
Author: Kihwal Lee 
AuthorDate: Mon Oct 21 16:52:37 2019 -0500

HDFS-12749. DN may not send block report to NN after NN restart. 
Contributed by Xiaoqiao He.

(cherry picked from commit c4e27ef7735acd6f91b73d2ecb0227f8dd75a2e4)
---
 .../hadoop/hdfs/server/datanode/BPServiceActor.java| 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 297a4a7..fcbe7f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -706,11 +706,16 @@ class BPServiceActor implements Runnable {
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"
 + e.getLocalizedMessage());
-sleepAndLogInterrupts(1000, "connecting to server");
   } catch(SocketTimeoutException e) {  // namenode is busy
 LOG.info("Problem connecting to server: " + nnAddr);
-sleepAndLogInterrupts(1000, "connecting to server");
+  } catch(RemoteException e) {
+LOG.warn("RemoteException in register", e);
+throw e;
+  } catch(IOException e) {
+LOG.warn("Problem connecting to server: " + nnAddr);
   }
+  // Try again in a second
+  sleepAndLogInterrupts(1000, "connecting to server");
 }
 
 LOG.info("Block pool " + this + " successfully registered with NN");
@@ -809,6 +814,15 @@ class BPServiceActor implements Runnable {
   if (bpos.processCommandFromActor(cmd, this) == false) {
 return false;
   }
+} catch (RemoteException re) {
+  String reClass = re.getClassName();
+  if (UnregisteredNodeException.class.getName().equals(reClass) ||
+  DisallowedDatanodeException.class.getName().equals(reClass) ||
+  IncorrectVersionException.class.getName().equals(reClass)) {
+LOG.warn(this + " is shutting down", re);
+shouldServiceRun = false;
+return false;
+  }
 } catch (IOException ioe) {
   LOG.warn("Error processing datanode Command", ioe);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.9 updated: HDFS-12749. DN may not send block report to NN after NN restart. Contributed by Xiaoqiao He.

2019-10-21 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new d112bf8  HDFS-12749. DN may not send block report to NN after NN 
restart. Contributed by Xiaoqiao He.
d112bf8 is described below

commit d112bf8c34815399196480b040ab940542f4d052
Author: Kihwal Lee 
AuthorDate: Mon Oct 21 16:51:47 2019 -0500

HDFS-12749. DN may not send block report to NN after NN restart. 
Contributed by Xiaoqiao He.

(cherry picked from commit c4e27ef7735acd6f91b73d2ecb0227f8dd75a2e4)
---
 .../hadoop/hdfs/server/datanode/BPServiceActor.java| 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 84dbd06..15f5c0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -773,11 +773,16 @@ class BPServiceActor implements Runnable {
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"
 + e.getLocalizedMessage());
-sleepAndLogInterrupts(1000, "connecting to server");
   } catch(SocketTimeoutException e) {  // namenode is busy
 LOG.info("Problem connecting to server: " + nnAddr);
-sleepAndLogInterrupts(1000, "connecting to server");
+  } catch(RemoteException e) {
+LOG.warn("RemoteException in register", e);
+throw e;
+  } catch(IOException e) {
+LOG.warn("Problem connecting to server: " + nnAddr);
   }
+  // Try again in a second
+  sleepAndLogInterrupts(1000, "connecting to server");
 }
 
 LOG.info("Block pool " + this + " successfully registered with NN");
@@ -880,6 +885,15 @@ class BPServiceActor implements Runnable {
   if (bpos.processCommandFromActor(cmd, this) == false) {
 return false;
   }
+} catch (RemoteException re) {
+  String reClass = re.getClassName();
+  if (UnregisteredNodeException.class.getName().equals(reClass) ||
+  DisallowedDatanodeException.class.getName().equals(reClass) ||
+  IncorrectVersionException.class.getName().equals(reClass)) {
+LOG.warn(this + " is shutting down", re);
+shouldServiceRun = false;
+return false;
+  }
 } catch (IOException ioe) {
   LOG.warn("Error processing datanode Command", ioe);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-12749. DN may not send block report to NN after NN restart. Contributed by Xiaoqiao He.

2019-10-21 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new ddc8673  HDFS-12749. DN may not send block report to NN after NN 
restart. Contributed by Xiaoqiao He.
ddc8673 is described below

commit ddc8673ebd2b94955ac640b5408adc8013d088ff
Author: Kihwal Lee 
AuthorDate: Mon Oct 21 16:51:02 2019 -0500

HDFS-12749. DN may not send block report to NN after NN restart. 
Contributed by Xiaoqiao He.

(cherry picked from commit c4e27ef7735acd6f91b73d2ecb0227f8dd75a2e4)
---
 .../hadoop/hdfs/server/datanode/BPServiceActor.java| 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index e99023d..c977f62 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -773,11 +773,16 @@ class BPServiceActor implements Runnable {
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"
 + e.getLocalizedMessage());
-sleepAndLogInterrupts(1000, "connecting to server");
   } catch(SocketTimeoutException e) {  // namenode is busy
 LOG.info("Problem connecting to server: " + nnAddr);
-sleepAndLogInterrupts(1000, "connecting to server");
+  } catch(RemoteException e) {
+LOG.warn("RemoteException in register", e);
+throw e;
+  } catch(IOException e) {
+LOG.warn("Problem connecting to server: " + nnAddr);
   }
+  // Try again in a second
+  sleepAndLogInterrupts(1000, "connecting to server");
 }
 
 LOG.info("Block pool " + this + " successfully registered with NN");
@@ -880,6 +885,15 @@ class BPServiceActor implements Runnable {
   if (bpos.processCommandFromActor(cmd, this) == false) {
 return false;
   }
+} catch (RemoteException re) {
+  String reClass = re.getClassName();
+  if (UnregisteredNodeException.class.getName().equals(reClass) ||
+  DisallowedDatanodeException.class.getName().equals(reClass) ||
+  IncorrectVersionException.class.getName().equals(reClass)) {
+LOG.warn(this + " is shutting down", re);
+shouldServiceRun = false;
+return false;
+  }
 } catch (IOException ioe) {
   LOG.warn("Error processing datanode Command", ioe);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2 updated: HDFS-12749. DN may not send block report to NN after NN restart. Contributed by Xiaoqiao He.

2019-10-21 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new b634bd2  HDFS-12749. DN may not send block report to NN after NN 
restart. Contributed by Xiaoqiao He.
b634bd2 is described below

commit b634bd225b50b106e5afcb0eb21f7d95261e9d89
Author: Kihwal Lee 
AuthorDate: Mon Oct 21 16:36:42 2019 -0500

HDFS-12749. DN may not send block report to NN after NN restart. 
Contributed by Xiaoqiao He.

(cherry picked from commit c4e27ef7735acd6f91b73d2ecb0227f8dd75a2e4)
---
 .../hadoop/hdfs/server/datanode/BPServiceActor.java| 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index e99023d..c977f62 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -773,11 +773,16 @@ class BPServiceActor implements Runnable {
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"
 + e.getLocalizedMessage());
-sleepAndLogInterrupts(1000, "connecting to server");
   } catch(SocketTimeoutException e) {  // namenode is busy
 LOG.info("Problem connecting to server: " + nnAddr);
-sleepAndLogInterrupts(1000, "connecting to server");
+  } catch(RemoteException e) {
+LOG.warn("RemoteException in register", e);
+throw e;
+  } catch(IOException e) {
+LOG.warn("Problem connecting to server: " + nnAddr);
   }
+  // Try again in a second
+  sleepAndLogInterrupts(1000, "connecting to server");
 }
 
 LOG.info("Block pool " + this + " successfully registered with NN");
@@ -880,6 +885,15 @@ class BPServiceActor implements Runnable {
   if (bpos.processCommandFromActor(cmd, this) == false) {
 return false;
   }
+} catch (RemoteException re) {
+  String reClass = re.getClassName();
+  if (UnregisteredNodeException.class.getName().equals(reClass) ||
+  DisallowedDatanodeException.class.getName().equals(reClass) ||
+  IncorrectVersionException.class.getName().equals(reClass)) {
+LOG.warn(this + " is shutting down", re);
+shouldServiceRun = false;
+return false;
+  }
 } catch (IOException ioe) {
   LOG.warn("Error processing datanode Command", ioe);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-12749. DN may not send block report to NN after NN restart. Contributed by Xiaoqiao He.

2019-10-21 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 362657c  HDFS-12749. DN may not send block report to NN after NN 
restart. Contributed by Xiaoqiao He.
362657c is described below

commit 362657c1a3518d703085d0a3909c74d8a7f22c93
Author: Kihwal Lee 
AuthorDate: Mon Oct 21 16:35:50 2019 -0500

HDFS-12749. DN may not send block report to NN after NN restart. 
Contributed by Xiaoqiao He.

(cherry picked from commit c4e27ef7735acd6f91b73d2ecb0227f8dd75a2e4)
---
 .../hadoop/hdfs/server/datanode/BPServiceActor.java| 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index fe931fc..d4e32da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -774,11 +774,16 @@ class BPServiceActor implements Runnable {
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"
 + e.getLocalizedMessage());
-sleepAndLogInterrupts(1000, "connecting to server");
   } catch(SocketTimeoutException e) {  // namenode is busy
 LOG.info("Problem connecting to server: " + nnAddr);
-sleepAndLogInterrupts(1000, "connecting to server");
+  } catch(RemoteException e) {
+LOG.warn("RemoteException in register", e);
+throw e;
+  } catch(IOException e) {
+LOG.warn("Problem connecting to server: " + nnAddr);
   }
+  // Try again in a second
+  sleepAndLogInterrupts(1000, "connecting to server");
 }
 
 LOG.info("Block pool " + this + " successfully registered with NN");
@@ -881,6 +886,15 @@ class BPServiceActor implements Runnable {
   if (bpos.processCommandFromActor(cmd, this) == false) {
 return false;
   }
+} catch (RemoteException re) {
+  String reClass = re.getClassName();
+  if (UnregisteredNodeException.class.getName().equals(reClass) ||
+  DisallowedDatanodeException.class.getName().equals(reClass) ||
+  IncorrectVersionException.class.getName().equals(reClass)) {
+LOG.warn(this + " is shutting down", re);
+shouldServiceRun = false;
+return false;
+  }
 } catch (IOException ioe) {
   LOG.warn("Error processing datanode Command", ioe);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-12749. DN may not send block report to NN after NN restart. Contributed by Xiaoqiao He.

2019-10-21 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 2d1f361  HDFS-12749. DN may not send block report to NN after NN 
restart. Contributed by Xiaoqiao He.
2d1f361 is described below

commit 2d1f3611cdb3cac3678cb8dd76da8b6dbdc99215
Author: Kihwal Lee 
AuthorDate: Mon Oct 21 16:34:17 2019 -0500

HDFS-12749. DN may not send block report to NN after NN restart. 
Contributed by Xiaoqiao He.

(cherry picked from commit c4e27ef7735acd6f91b73d2ecb0227f8dd75a2e4)
---
 .../hadoop/hdfs/server/datanode/BPServiceActor.java| 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 93af5ec..96977a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -776,11 +776,16 @@ class BPServiceActor implements Runnable {
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"
 + e.getLocalizedMessage());
-sleepAndLogInterrupts(1000, "connecting to server");
   } catch(SocketTimeoutException e) {  // namenode is busy
 LOG.info("Problem connecting to server: " + nnAddr);
-sleepAndLogInterrupts(1000, "connecting to server");
+  } catch(RemoteException e) {
+LOG.warn("RemoteException in register", e);
+throw e;
+  } catch(IOException e) {
+LOG.warn("Problem connecting to server: " + nnAddr);
   }
+  // Try again in a second
+  sleepAndLogInterrupts(1000, "connecting to server");
 }
 
 LOG.info("Block pool " + this + " successfully registered with NN");
@@ -883,6 +888,15 @@ class BPServiceActor implements Runnable {
   if (bpos.processCommandFromActor(cmd, this) == false) {
 return false;
   }
+} catch (RemoteException re) {
+  String reClass = re.getClassName();
+  if (UnregisteredNodeException.class.getName().equals(reClass) ||
+  DisallowedDatanodeException.class.getName().equals(reClass) ||
+  IncorrectVersionException.class.getName().equals(reClass)) {
+LOG.warn(this + " is shutting down", re);
+shouldServiceRun = false;
+return false;
+  }
 } catch (IOException ioe) {
   LOG.warn("Error processing datanode Command", ioe);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-12749. DN may not send block report to NN after NN restart. Contributed by Xiaoqiao He.

2019-10-21 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c4e27ef  HDFS-12749. DN may not send block report to NN after NN 
restart. Contributed by Xiaoqiao He.
c4e27ef is described below

commit c4e27ef7735acd6f91b73d2ecb0227f8dd75a2e4
Author: Kihwal Lee 
AuthorDate: Mon Oct 21 16:32:28 2019 -0500

HDFS-12749. DN may not send block report to NN after NN restart. 
Contributed by Xiaoqiao He.
---
 .../hadoop/hdfs/server/datanode/BPServiceActor.java| 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 1dafdd0..495035e 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -797,11 +797,16 @@ class BPServiceActor implements Runnable {
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"
 + e.getLocalizedMessage());
-sleepAndLogInterrupts(1000, "connecting to server");
   } catch(SocketTimeoutException e) {  // namenode is busy
 LOG.info("Problem connecting to server: " + nnAddr);
-sleepAndLogInterrupts(1000, "connecting to server");
+  } catch(RemoteException e) {
+LOG.warn("RemoteException in register", e);
+throw e;
+  } catch(IOException e) {
+LOG.warn("Problem connecting to server: " + nnAddr);
   }
+  // Try again in a second
+  sleepAndLogInterrupts(1000, "connecting to server");
 }
 
 if (bpRegistration == null) {
@@ -908,6 +913,15 @@ class BPServiceActor implements Runnable {
   if (bpos.processCommandFromActor(cmd, this) == false) {
 return false;
   }
+} catch (RemoteException re) {
+  String reClass = re.getClassName();
+  if (UnregisteredNodeException.class.getName().equals(reClass) ||
+  DisallowedDatanodeException.class.getName().equals(reClass) ||
+  IncorrectVersionException.class.getName().equals(reClass)) {
+LOG.warn(this + " is shutting down", re);
+shouldServiceRun = false;
+return false;
+  }
 } catch (IOException ioe) {
   LOG.warn("Error processing datanode Command", ioe);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.8 updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new dce2678  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
dce2678 is described below

commit dce2678ee5e2a590925c805e0f249a9d77c33dbb
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:28:48 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee

(cherry picked from commit d4205dce176287e863f567b333e0d408bf51ae6d)
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 41429ac..a338b53 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -320,6 +320,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 943846d..bffda59 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -256,6 +256,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 813642f..f1d37ed 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -448,11 +448,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -944,6 +951,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 3cf0f0e..88d290b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -77,7 +77,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataOutputStream create(Path f);
 public FSDataOutputStream create(Path f, boolean overwrite

[hadoop] branch branch-2.9 updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new c5cbef8  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
c5cbef8 is described below

commit c5cbef88e478cd2bb0c42e87327a80eed6cc73af
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:28:09 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee

(cherry picked from commit d4205dce176287e863f567b333e0d408bf51ae6d)
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 954a041..1c38df8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -320,6 +320,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 5eeff15..3d8f03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -256,6 +256,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 27d8c0b..4c73eae 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -448,11 +448,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -944,6 +951,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index c72f579..7c0115e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -77,7 +77,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataOutputStream create(Path f);
 public FSDataOutputStream create(Path f, boolean overwrite

[hadoop] branch branch-2 updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new ca93156  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
ca93156 is described below

commit ca93156cc5eb5a06ad025dc5ce87c3c49599bd79
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:27:23 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee

(cherry picked from commit d4205dce176287e863f567b333e0d408bf51ae6d)
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 954a041..1c38df8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -320,6 +320,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 5eeff15..3d8f03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -256,6 +256,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 27d8c0b..4c73eae 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -448,11 +448,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -944,6 +951,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index c72f579..7c0115e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -77,7 +77,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataOutputStream create(Path f);
 public FSDataOutputStream create(Path f, boolean overwrite

[hadoop] branch branch-3.1 updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 56562b9  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
56562b9 is described below

commit 56562b911785806bbba5d5e7b897fc0149bfe271
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:26:36 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee

(cherry picked from commit d4205dce176287e863f567b333e0d408bf51ae6d)
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index f9bbfb1..9cab0f2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -332,6 +332,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 8b90f53..2341fe4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -268,6 +268,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 9523070..ddc1190 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -462,11 +462,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -1077,6 +1084,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 9e01aef..7c4dfe5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -78,7 +78,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataInputStream open(PathHandle f);
 public FSDataOutputStream create(Path f

[hadoop] branch branch-3.2 updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 7477f8d  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
7477f8d is described below

commit 7477f8d2e9259bd4bb3e9bffe7d805c898ffe332
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:24:39 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee

(cherry picked from commit d4205dce176287e863f567b333e0d408bf51ae6d)
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index f9bbfb1..9cab0f2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -332,6 +332,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 8b90f53..2341fe4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -268,6 +268,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 9523070..ddc1190 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -462,11 +462,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -1077,6 +1084,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 9e01aef..7c4dfe5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -78,7 +78,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataInputStream open(PathHandle f);
 public FSDataOutputStream create(Path f

[hadoop] branch trunk updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d4205dc  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
d4205dc is described below

commit d4205dce176287e863f567b333e0d408bf51ae6d
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:22:19 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 99c18b6..e05c574 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -334,6 +334,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index cec1891..c93225f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -280,6 +280,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index f127d8d..6bc469c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -550,11 +550,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -1172,6 +1179,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 6de4f07..f0057a6 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -77,7 +77,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataInputStream open(PathHandle f);
 public FSDataOutputStream create(Path f);


-
To unsubscribe, e-mail

[hadoop] branch branch-2 updated: HDFS-13959. TestUpgradeDomainBlockPlacementPolicy is flaky. Contributed by Ayush Saxena.

2019-09-18 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new d4038e7  HDFS-13959. TestUpgradeDomainBlockPlacementPolicy is flaky. 
Contributed by Ayush Saxena.
d4038e7 is described below

commit d4038e7a3957e291fef71d2e00768f1cd2d6638b
Author: Kihwal Lee 
AuthorDate: Wed Sep 18 16:51:21 2019 -0500

HDFS-13959. TestUpgradeDomainBlockPlacementPolicy is flaky. Contributed by 
Ayush Saxena.

(cherry picked from commit 1851d06eb3b70f39f3054a7c06f0ad2bc664aaec)
---
 .../TestUpgradeDomainBlockPlacementPolicy.java | 22 +-
 1 file changed, 13 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
index 8460b6f..3383c4e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
@@ -65,14 +65,8 @@ public class TestUpgradeDomainBlockPlacementPolicy {
   private static final int DEFAULT_BLOCK_SIZE = 1024;
   static final String[] racks =
   { "/RACK1", "/RACK1", "/RACK1", "/RACK2", "/RACK2", "/RACK2" };
-  /**
-   *  Use host names that can be resolved (
-   *  InetSocketAddress#isUnresolved == false). Otherwise,
-   *  CombinedHostFileManager won't allow those hosts.
-   */
   static final String[] hosts =
-  {"127.0.0.1", "127.0.0.1", "127.0.0.1", "127.0.0.1",
-  "127.0.0.1", "127.0.0.1"};
+  {"host1", "host2", "host3", "host4", "host5", "host6"};
   static final String[] upgradeDomains =
   {"ud5", "ud2", "ud3", "ud1", "ud2", "ud4"};
   static final Set expectedDatanodeIDs = new HashSet<>();
@@ -134,7 +128,12 @@ public class TestUpgradeDomainBlockPlacementPolicy {
 for (int i = 0; i < hosts.length; i++) {
   datanodes[i] = new DatanodeAdminProperties();
   DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
-  datanodes[i].setHostName(datanodeID.getHostName());
+  /*
+   *  Use host names that can be resolved (
+   *  InetSocketAddress#isUnresolved == false). Otherwise,
+   *  CombinedHostFileManager won't allow those hosts.
+   */
+  datanodes[i].setHostName(datanodeID.getIpAddr());
   datanodes[i].setPort(datanodeID.getXferPort());
   datanodes[i].setUpgradeDomain(upgradeDomains[i]);
 }
@@ -168,7 +167,12 @@ public class TestUpgradeDomainBlockPlacementPolicy {
 for (int i = 0; i < hosts.length; i++) {
   datanodes[i] = new DatanodeAdminProperties();
   DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
-  datanodes[i].setHostName(datanodeID.getHostName());
+  /*
+   *  Use host names that can be resolved (
+   *  InetSocketAddress#isUnresolved == false). Otherwise,
+   *  CombinedHostFileManager won't allow those hosts.
+   */
+  datanodes[i].setHostName(datanodeID.getIpAddr());
   datanodes[i].setPort(datanodeID.getXferPort());
   datanodes[i].setUpgradeDomain(upgradeDomains[i]);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   3   4   5   6   7   8   9   10   >