[hadoop] branch branch-3.1 updated: HDFS-15622. Deleted blocks linger in the replications queue. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 7a3085d  HDFS-15622. Deleted blocks linger in the replications queue. 
Contributed by Ahmed Hussein.
7a3085d is described below

commit 7a3085d552c4b24cdf23da201a300928ada7b8fd
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 21:01:09 2020 -0500

HDFS-15622. Deleted blocks linger in the replications queue. Contributed by 
Ahmed Hussein.

(cherry picked from commit da1b6e3cc286db00b385f3280627d2b2063b4e59)
---
 .../blockmanagement/LowRedundancyBlocks.java   | 30 ++
 .../TestLowRedundancyBlockQueues.java  | 47 +-
 .../blockmanagement/TestReplicationPolicy.java | 17 ++--
 3 files changed, 81 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index f6ef248..d719e93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -500,6 +501,8 @@ class LowRedundancyBlocks implements Iterable {
* the block count is met or iteration reaches the end of the lowest priority
* list, in which case bookmarks for each block list are reset to the heads
* of their respective lists.
+   * If a block is deleted (has invalid bcId), it will be removed from the low
+   * redundancy queues.
*
* @param blocksToProcess - number of blocks to fetch from low redundancy
*  blocks.
@@ -515,21 +518,32 @@ class LowRedundancyBlocks implements Iterable {
 
 int count = 0;
 int priority = 0;
+HashSet toRemove = new HashSet<>();
 for (; count < blocksToProcess && priority < LEVEL; priority++) {
-  if (priority == QUEUE_WITH_CORRUPT_BLOCKS) {
-// do not choose corrupted blocks.
-continue;
-  }
-
   // Go through all blocks that need reconstructions with current priority.
   // Set the iterator to the first unprocessed block at this priority level
+  // We do not want to skip QUEUE_WITH_CORRUPT_BLOCKS because we still need
+  // to look for deleted blocks if any.
+  final boolean inCorruptLevel = (QUEUE_WITH_CORRUPT_BLOCKS == priority);
   final Iterator i = priorityQueues.get(priority).getBookmark();
   final List blocks = new LinkedList<>();
-  blocksToReconstruct.add(blocks);
-  // Loop through all remaining blocks in the list.
+  if (!inCorruptLevel) {
+blocksToReconstruct.add(blocks);
+  }
   for(; count < blocksToProcess && i.hasNext(); count++) {
-blocks.add(i.next());
+BlockInfo block = i.next();
+if (block.isDeleted()) {
+  toRemove.add(block);
+  continue;
+}
+if (!inCorruptLevel) {
+  blocks.add(block);
+}
+  }
+  for (BlockInfo bInfo : toRemove) {
+remove(bInfo, priority);
   }
+  toRemove.clear();
 }
 
 if (priority == LEVEL || resetIterators) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index e63a8d8..ef614fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -41,6 +42,7 @@ import static org.junit.Assert.fail;
 public class TestLowRedundancyBlockQueues {
 
   private final ErasureCodingPolicy ecPolicy;
+  private static AtomicLong mockINodeId = new AtomicLong(0);
 
   public TestLowRedundancyBlockQueues(ErasureCodingPolicy policy) {
 ecPolicy = policy;
@@ -52,7 +54,15 @@ public class TestLowRedundancyBlockQueues {
   }
 
   private BlockInfo genBlockInfo(long id) {
-return new BlockInfoContiguous(new 

[hadoop] branch branch-3.2 updated: HDFS-15622. Deleted blocks linger in the replications queue. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new f363c3b  HDFS-15622. Deleted blocks linger in the replications queue. 
Contributed by Ahmed Hussein.
f363c3b is described below

commit f363c3b315472e45fd1d0f8225e31184b6c353a1
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 19:48:42 2020 -0500

HDFS-15622. Deleted blocks linger in the replications queue. Contributed by 
Ahmed Hussein.

(cherry picked from commit da1b6e3cc286db00b385f3280627d2b2063b4e59)
---
 .../blockmanagement/LowRedundancyBlocks.java   | 30 ++
 .../TestLowRedundancyBlockQueues.java  | 47 +-
 .../blockmanagement/TestReplicationPolicy.java | 17 ++--
 3 files changed, 81 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index f6ef248..d719e93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -500,6 +501,8 @@ class LowRedundancyBlocks implements Iterable {
* the block count is met or iteration reaches the end of the lowest priority
* list, in which case bookmarks for each block list are reset to the heads
* of their respective lists.
+   * If a block is deleted (has invalid bcId), it will be removed from the low
+   * redundancy queues.
*
* @param blocksToProcess - number of blocks to fetch from low redundancy
*  blocks.
@@ -515,21 +518,32 @@ class LowRedundancyBlocks implements Iterable {
 
 int count = 0;
 int priority = 0;
+HashSet toRemove = new HashSet<>();
 for (; count < blocksToProcess && priority < LEVEL; priority++) {
-  if (priority == QUEUE_WITH_CORRUPT_BLOCKS) {
-// do not choose corrupted blocks.
-continue;
-  }
-
   // Go through all blocks that need reconstructions with current priority.
   // Set the iterator to the first unprocessed block at this priority level
+  // We do not want to skip QUEUE_WITH_CORRUPT_BLOCKS because we still need
+  // to look for deleted blocks if any.
+  final boolean inCorruptLevel = (QUEUE_WITH_CORRUPT_BLOCKS == priority);
   final Iterator i = priorityQueues.get(priority).getBookmark();
   final List blocks = new LinkedList<>();
-  blocksToReconstruct.add(blocks);
-  // Loop through all remaining blocks in the list.
+  if (!inCorruptLevel) {
+blocksToReconstruct.add(blocks);
+  }
   for(; count < blocksToProcess && i.hasNext(); count++) {
-blocks.add(i.next());
+BlockInfo block = i.next();
+if (block.isDeleted()) {
+  toRemove.add(block);
+  continue;
+}
+if (!inCorruptLevel) {
+  blocks.add(block);
+}
+  }
+  for (BlockInfo bInfo : toRemove) {
+remove(bInfo, priority);
   }
+  toRemove.clear();
 }
 
 if (priority == LEVEL || resetIterators) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index e63a8d8..ef614fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -41,6 +42,7 @@ import static org.junit.Assert.fail;
 public class TestLowRedundancyBlockQueues {
 
   private final ErasureCodingPolicy ecPolicy;
+  private static AtomicLong mockINodeId = new AtomicLong(0);
 
   public TestLowRedundancyBlockQueues(ErasureCodingPolicy policy) {
 ecPolicy = policy;
@@ -52,7 +54,15 @@ public class TestLowRedundancyBlockQueues {
   }
 
   private BlockInfo genBlockInfo(long id) {
-return new BlockInfoContiguous(new 

[hadoop] branch branch-3.3 updated: HDFS-15622. Deleted blocks linger in the replications queue. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 02709cb  HDFS-15622. Deleted blocks linger in the replications queue. 
Contributed by Ahmed Hussein.
02709cb is described below

commit 02709cb054509677795cff5de92e1cbb0edc1c88
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 19:41:38 2020 -0500

HDFS-15622. Deleted blocks linger in the replications queue. Contributed by 
Ahmed Hussein.

(cherry picked from commit da1b6e3cc286db00b385f3280627d2b2063b4e59)
---
 .../blockmanagement/LowRedundancyBlocks.java   | 30 ++
 .../TestLowRedundancyBlockQueues.java  | 47 +-
 .../blockmanagement/TestReplicationPolicy.java | 17 ++--
 3 files changed, 81 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index f6ef248..d719e93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -500,6 +501,8 @@ class LowRedundancyBlocks implements Iterable {
* the block count is met or iteration reaches the end of the lowest priority
* list, in which case bookmarks for each block list are reset to the heads
* of their respective lists.
+   * If a block is deleted (has invalid bcId), it will be removed from the low
+   * redundancy queues.
*
* @param blocksToProcess - number of blocks to fetch from low redundancy
*  blocks.
@@ -515,21 +518,32 @@ class LowRedundancyBlocks implements Iterable {
 
 int count = 0;
 int priority = 0;
+HashSet toRemove = new HashSet<>();
 for (; count < blocksToProcess && priority < LEVEL; priority++) {
-  if (priority == QUEUE_WITH_CORRUPT_BLOCKS) {
-// do not choose corrupted blocks.
-continue;
-  }
-
   // Go through all blocks that need reconstructions with current priority.
   // Set the iterator to the first unprocessed block at this priority level
+  // We do not want to skip QUEUE_WITH_CORRUPT_BLOCKS because we still need
+  // to look for deleted blocks if any.
+  final boolean inCorruptLevel = (QUEUE_WITH_CORRUPT_BLOCKS == priority);
   final Iterator i = priorityQueues.get(priority).getBookmark();
   final List blocks = new LinkedList<>();
-  blocksToReconstruct.add(blocks);
-  // Loop through all remaining blocks in the list.
+  if (!inCorruptLevel) {
+blocksToReconstruct.add(blocks);
+  }
   for(; count < blocksToProcess && i.hasNext(); count++) {
-blocks.add(i.next());
+BlockInfo block = i.next();
+if (block.isDeleted()) {
+  toRemove.add(block);
+  continue;
+}
+if (!inCorruptLevel) {
+  blocks.add(block);
+}
+  }
+  for (BlockInfo bInfo : toRemove) {
+remove(bInfo, priority);
   }
+  toRemove.clear();
 }
 
 if (priority == LEVEL || resetIterators) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index c40f277..e33e24f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -41,6 +42,7 @@ import static org.junit.Assert.fail;
 public class TestLowRedundancyBlockQueues {
 
   private final ErasureCodingPolicy ecPolicy;
+  private static AtomicLong mockINodeId = new AtomicLong(0);
 
   public TestLowRedundancyBlockQueues(ErasureCodingPolicy policy) {
 ecPolicy = policy;
@@ -52,7 +54,15 @@ public class TestLowRedundancyBlockQueues {
   }
 
   private BlockInfo genBlockInfo(long id) {
-return new BlockInfoContiguous(new 

[hadoop] branch trunk updated: HDFS-15622. Deleted blocks linger in the replications queue. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new da1b6e3  HDFS-15622. Deleted blocks linger in the replications queue. 
Contributed by Ahmed Hussein.
da1b6e3 is described below

commit da1b6e3cc286db00b385f3280627d2b2063b4e59
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 16:59:49 2020 -0500

HDFS-15622. Deleted blocks linger in the replications queue. Contributed by 
Ahmed Hussein.
---
 .../blockmanagement/LowRedundancyBlocks.java   | 30 ++
 .../TestLowRedundancyBlockQueues.java  | 47 +-
 .../blockmanagement/TestReplicationPolicy.java | 17 ++--
 3 files changed, 81 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index f6ef248..d719e93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -500,6 +501,8 @@ class LowRedundancyBlocks implements Iterable {
* the block count is met or iteration reaches the end of the lowest priority
* list, in which case bookmarks for each block list are reset to the heads
* of their respective lists.
+   * If a block is deleted (has invalid bcId), it will be removed from the low
+   * redundancy queues.
*
* @param blocksToProcess - number of blocks to fetch from low redundancy
*  blocks.
@@ -515,21 +518,32 @@ class LowRedundancyBlocks implements Iterable {
 
 int count = 0;
 int priority = 0;
+HashSet toRemove = new HashSet<>();
 for (; count < blocksToProcess && priority < LEVEL; priority++) {
-  if (priority == QUEUE_WITH_CORRUPT_BLOCKS) {
-// do not choose corrupted blocks.
-continue;
-  }
-
   // Go through all blocks that need reconstructions with current priority.
   // Set the iterator to the first unprocessed block at this priority level
+  // We do not want to skip QUEUE_WITH_CORRUPT_BLOCKS because we still need
+  // to look for deleted blocks if any.
+  final boolean inCorruptLevel = (QUEUE_WITH_CORRUPT_BLOCKS == priority);
   final Iterator i = priorityQueues.get(priority).getBookmark();
   final List blocks = new LinkedList<>();
-  blocksToReconstruct.add(blocks);
-  // Loop through all remaining blocks in the list.
+  if (!inCorruptLevel) {
+blocksToReconstruct.add(blocks);
+  }
   for(; count < blocksToProcess && i.hasNext(); count++) {
-blocks.add(i.next());
+BlockInfo block = i.next();
+if (block.isDeleted()) {
+  toRemove.add(block);
+  continue;
+}
+if (!inCorruptLevel) {
+  blocks.add(block);
+}
+  }
+  for (BlockInfo bInfo : toRemove) {
+remove(bInfo, priority);
   }
+  toRemove.clear();
 }
 
 if (priority == LEVEL || resetIterators) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index c40f277..e33e24f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -41,6 +42,7 @@ import static org.junit.Assert.fail;
 public class TestLowRedundancyBlockQueues {
 
   private final ErasureCodingPolicy ecPolicy;
+  private static AtomicLong mockINodeId = new AtomicLong(0);
 
   public TestLowRedundancyBlockQueues(ErasureCodingPolicy policy) {
 ecPolicy = policy;
@@ -52,7 +54,15 @@ public class TestLowRedundancyBlockQueues {
   }
 
   private BlockInfo genBlockInfo(long id) {
-return new BlockInfoContiguous(new Block(id), (short) 3);
+return genBlockInfo(id, false);
+  }
+
+  private BlockInfo 

[hadoop] branch branch-3.1 updated: HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 33db7c1  HDFS-15618. Improve datanode shutdown latency. Contributed by 
Ahmed Hussein.
33db7c1 is described below

commit 33db7c140b3d8945542fb021ad222dac4a0d38a8
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 15:26:34 2020 -0500

HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

(cherry picked from commit cf932a7e2d6182471df4eba1333737912a32534b)
(cherry picked from commit f3b2d85690aeb65da7540f48edd52ca7315d0b02)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  10 ++
 .../hadoop/hdfs/server/datanode/BlockScanner.java  |  33 -
 .../hadoop/hdfs/server/datanode/DataNode.java  |   4 +-
 .../hadoop/hdfs/server/datanode/VolumeScanner.java |   3 +
 .../server/datanode/VolumeScannerCBInjector.java   |  51 
 .../src/main/resources/hdfs-default.xml|   9 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java |  28 +++-
 .../hdfs/server/datanode/TestBlockScanner.java | 142 +
 8 files changed, 273 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index bfa35bd..5412212 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -30,6 +30,8 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalcul
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
 
+import java.util.concurrent.TimeUnit;
+
 /** 
  * This class contains constants for configuration keys and default values
  * used in hdfs.
@@ -731,6 +733,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 
24;  // 3 weeks.
   public static final String  DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = 
"dfs.block.scanner.volume.bytes.per.second";
   public static final long
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
+  /**
+   * The amount of time in milliseconds that the BlockScanner times out waiting
+   * for the VolumeScanner thread to join during a shutdown call.
+   */
+  public static final String  DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY =
+  "dfs.block.scanner.volume.join.timeout.ms";
+  public static final long DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
   public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = 
"dfs.datanode.transferTo.allowed";
   public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
   public static final String  DFS_HEARTBEAT_INTERVAL_KEY = 
"dfs.heartbeat.interval";
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 8081895..82753e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
 
@@ -66,6 +68,12 @@ public class BlockScanner {
*/
   private Conf conf;
 
+  /**
+   * Timeout duration in milliseconds waiting for {@link VolumeScanner} to stop
+   * inside {@link #removeAllVolumeScanners}.
+   */
+  private long joinVolumeScannersTimeOutMs;
+
   @VisibleForTesting
   void setConf(Conf conf) {
 this.conf = conf;
@@ -179,6 +187,9 @@ public class BlockScanner {
 
   public BlockScanner(DataNode datanode, Configuration conf) {
 this.datanode = datanode;
+setJoinVolumeScannersTimeOutMs(
+conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
+DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT));
 this.conf = new Conf(conf);
 if (isEnabled()) {
   

[hadoop] branch branch-3.2 updated: HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new f3b2d85  HDFS-15618. Improve datanode shutdown latency. Contributed by 
Ahmed Hussein.
f3b2d85 is described below

commit f3b2d85690aeb65da7540f48edd52ca7315d0b02
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 14:59:09 2020 -0500

HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

(cherry picked from commit cf932a7e2d6182471df4eba1333737912a32534b)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  10 ++
 .../hadoop/hdfs/server/datanode/BlockScanner.java  |  33 -
 .../hadoop/hdfs/server/datanode/DataNode.java  |   4 +-
 .../hadoop/hdfs/server/datanode/VolumeScanner.java |   3 +
 .../server/datanode/VolumeScannerCBInjector.java   |  51 
 .../src/main/resources/hdfs-default.xml|   9 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java |  28 +++-
 .../hdfs/server/datanode/TestBlockScanner.java | 142 +
 8 files changed, 273 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 2ba3850..e0d4306 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -31,6 +31,8 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalcul
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
 
+import java.util.concurrent.TimeUnit;
+
 /** 
  * This class contains constants for configuration keys and default values
  * used in hdfs.
@@ -787,6 +789,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 
24;  // 3 weeks.
   public static final String  DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = 
"dfs.block.scanner.volume.bytes.per.second";
   public static final long
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
+  /**
+   * The amount of time in milliseconds that the BlockScanner times out waiting
+   * for the VolumeScanner thread to join during a shutdown call.
+   */
+  public static final String  DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY =
+  "dfs.block.scanner.volume.join.timeout.ms";
+  public static final long DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
   public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = 
"dfs.datanode.transferTo.allowed";
   public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
   public static final String  DFS_HEARTBEAT_INTERVAL_KEY = 
"dfs.heartbeat.interval";
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 8081895..82753e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
 
@@ -66,6 +68,12 @@ public class BlockScanner {
*/
   private Conf conf;
 
+  /**
+   * Timeout duration in milliseconds waiting for {@link VolumeScanner} to stop
+   * inside {@link #removeAllVolumeScanners}.
+   */
+  private long joinVolumeScannersTimeOutMs;
+
   @VisibleForTesting
   void setConf(Conf conf) {
 this.conf = conf;
@@ -179,6 +187,9 @@ public class BlockScanner {
 
   public BlockScanner(DataNode datanode, Configuration conf) {
 this.datanode = datanode;
+setJoinVolumeScannersTimeOutMs(
+conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
+DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT));
 this.conf = new Conf(conf);
 if (isEnabled()) {
   LOG.info("Initialized block scanner with targetBytesPerSec {}",
@@ -198,6 +209,13 @@ public 

[hadoop] branch branch-3.3 updated: MAPREDUCE-7303. Fix TestJobResourceUploader failures after HADOOP-16878. Contributed by Peter Bacsko.

2020-10-22 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new c40f0f1  MAPREDUCE-7303. Fix TestJobResourceUploader failures after 
HADOOP-16878. Contributed by Peter Bacsko.
c40f0f1 is described below

commit c40f0f1eb34df64449472f698d1d68355f109e9b
Author: Akira Ajisaka 
AuthorDate: Fri Oct 23 04:34:49 2020 +0900

MAPREDUCE-7303. Fix TestJobResourceUploader failures after HADOOP-16878. 
Contributed by Peter Bacsko.

(cherry picked from commit 7bc305db5d5a50127dd7780534d6d7d0b7c683d1)
---
 .../org/apache/hadoop/mapreduce/TestJobResourceUploader.java | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
index bbfe2fb..c49d771 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
@@ -399,6 +399,7 @@ public class TestJobResourceUploader {
   Path expectedRemotePath) throws IOException, URISyntaxException {
 Path dstPath = new Path("hdfs://localhost:1234/home/hadoop/");
 DistributedFileSystem fs = mock(DistributedFileSystem.class);
+when(fs.makeQualified(any(Path.class))).thenReturn(dstPath);
 // make sure that FileUtils.copy() doesn't try to copy anything
 when(fs.mkdirs(any(Path.class))).thenReturn(false);
 when(fs.getUri()).thenReturn(dstPath.toUri());
@@ -407,6 +408,7 @@ public class TestJobResourceUploader {
 JobConf jConf = new JobConf();
 Path originalPath = spy(path);
 FileSystem localFs = mock(FileSystem.class);
+when(localFs.makeQualified(any(Path.class))).thenReturn(path);
 FileStatus fileStatus = mock(FileStatus.class);
 when(localFs.getFileStatus(any(Path.class))).thenReturn(fileStatus);
 when(fileStatus.isDirectory()).thenReturn(true);
@@ -420,8 +422,14 @@ public class TestJobResourceUploader {
 originalPath, jConf, (short) 1);
 
 ArgumentCaptor pathCaptor = ArgumentCaptor.forClass(Path.class);
-verify(fs).makeQualified(pathCaptor.capture());
-Assert.assertEquals("Path", expectedRemotePath, pathCaptor.getValue());
+verify(fs, times(2)).makeQualified(pathCaptor.capture());
+List paths = pathCaptor.getAllValues();
+// first call is invoked on a path which was created by the test,
+// but the second one is created in copyRemoteFiles()
+Assert.assertEquals("Expected remote path",
+expectedRemotePath, paths.get(0));
+Assert.assertEquals("Expected remote path",
+expectedRemotePath, paths.get(1));
   }
 
   private void testErasureCodingSetting(boolean defaultBehavior)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: MAPREDUCE-7303. Fix TestJobResourceUploader failures after HADOOP-16878. Contributed by Peter Bacsko.

2020-10-22 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7bc305d  MAPREDUCE-7303. Fix TestJobResourceUploader failures after 
HADOOP-16878. Contributed by Peter Bacsko.
7bc305d is described below

commit 7bc305db5d5a50127dd7780534d6d7d0b7c683d1
Author: Akira Ajisaka 
AuthorDate: Fri Oct 23 04:34:49 2020 +0900

MAPREDUCE-7303. Fix TestJobResourceUploader failures after HADOOP-16878. 
Contributed by Peter Bacsko.
---
 .../org/apache/hadoop/mapreduce/TestJobResourceUploader.java | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
index bbfe2fb..c49d771 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
@@ -399,6 +399,7 @@ public class TestJobResourceUploader {
   Path expectedRemotePath) throws IOException, URISyntaxException {
 Path dstPath = new Path("hdfs://localhost:1234/home/hadoop/");
 DistributedFileSystem fs = mock(DistributedFileSystem.class);
+when(fs.makeQualified(any(Path.class))).thenReturn(dstPath);
 // make sure that FileUtils.copy() doesn't try to copy anything
 when(fs.mkdirs(any(Path.class))).thenReturn(false);
 when(fs.getUri()).thenReturn(dstPath.toUri());
@@ -407,6 +408,7 @@ public class TestJobResourceUploader {
 JobConf jConf = new JobConf();
 Path originalPath = spy(path);
 FileSystem localFs = mock(FileSystem.class);
+when(localFs.makeQualified(any(Path.class))).thenReturn(path);
 FileStatus fileStatus = mock(FileStatus.class);
 when(localFs.getFileStatus(any(Path.class))).thenReturn(fileStatus);
 when(fileStatus.isDirectory()).thenReturn(true);
@@ -420,8 +422,14 @@ public class TestJobResourceUploader {
 originalPath, jConf, (short) 1);
 
 ArgumentCaptor pathCaptor = ArgumentCaptor.forClass(Path.class);
-verify(fs).makeQualified(pathCaptor.capture());
-Assert.assertEquals("Path", expectedRemotePath, pathCaptor.getValue());
+verify(fs, times(2)).makeQualified(pathCaptor.capture());
+List paths = pathCaptor.getAllValues();
+// first call is invoked on a path which was created by the test,
+// but the second one is created in copyRemoteFiles()
+Assert.assertEquals("Expected remote path",
+expectedRemotePath, paths.get(0));
+Assert.assertEquals("Expected remote path",
+expectedRemotePath, paths.get(1));
   }
 
   private void testErasureCodingSetting(boolean defaultBehavior)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-17319. Update the checkstyle config to ban some guava functions. (#2400)

2020-10-22 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6ea2731  HADOOP-17319. Update the checkstyle config to ban some guava 
functions. (#2400)
6ea2731 is described below

commit 6ea2731c2b587d7c09e16c96522a9c0eca75e9da
Author: Akira Ajisaka 
AuthorDate: Fri Oct 23 04:28:17 2020 +0900

HADOOP-17319. Update the checkstyle config to ban some guava functions. 
(#2400)
---
 hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml 
b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
index 0ff47b8..51f9acc 100644
--- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
+++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
@@ -122,6 +122,7 @@
 
   
   
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-17175. [JDK 11] Fix javadoc errors in hadoop-common module. (#2397)

2020-10-22 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6a9ceed  HADOOP-17175. [JDK 11] Fix javadoc errors in hadoop-common 
module. (#2397)
6a9ceed is described below

commit 6a9ceedfb3ee7c2f66a44083fb8e68cca508e207
Author: Akira Ajisaka 
AuthorDate: Fri Oct 23 03:15:45 2020 +0900

HADOOP-17175. [JDK 11] Fix javadoc errors in hadoop-common module. (#2397)
---
 hadoop-common-project/hadoop-common/pom.xml|  1 -
 .../main/java/org/apache/hadoop/fs/FileUtil.java   |  2 +-
 .../java/org/apache/hadoop/fs/PartialListing.java  |  2 +-
 .../org/apache/hadoop/fs/impl/FutureIOSupport.java |  8 +-
 .../org/apache/hadoop/fs/viewfs/Constants.java |  4 +-
 .../fs/viewfs/HCFSMountTableConfigLoader.java  |  2 +-
 .../fs/viewfs/ViewFileSystemOverloadScheme.java| 95 --
 .../java/org/apache/hadoop/ipc/ProxyCombiner.java  |  4 +-
 .../hadoop/ipc/WeightedTimeCostProvider.java   |  4 +-
 .../hadoop/net/DomainNameResolverFactory.java  |  4 +-
 .../java/org/apache/hadoop/security/Groups.java| 12 +--
 .../security/ssl/DelegatingSSLSocketFactory.java   | 23 +++---
 12 files changed, 87 insertions(+), 74 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index fc0927e..cc786e8 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -35,7 +35,6 @@
 true
 ../etc/hadoop
 wsce-site.xml
-true
   
 
 
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 73ca6e6..e078a2c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -1812,7 +1812,7 @@ public class FileUtil {
* specified charset. This utility method opens the file for writing, 
creating
* the file if it does not exist, or overwrites an existing file.
*
-   * @param FileContext the file context with which to create the file
+   * @param fs the file context with which to create the file
* @param path the path to the file
* @param charseq the char sequence to write to the file
* @param cs the charset to use for encoding
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java
index 80d173e..cec5d68 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java
@@ -30,7 +30,7 @@ import java.util.List;
  * A partial listing of the children of a parent directory. Since it is a
  * partial listing, multiple PartialListing may need to be combined to obtain
  * the full listing of a parent directory.
- * 
+ * 
  * ListingBatch behaves similar to a Future, in that getting the result via
  * {@link #get()} will throw an Exception if there was a failure.
  */
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
index f13d701..84ca94e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java
@@ -166,11 +166,11 @@ public final class FutureIOSupport {
* Propagate options to any builder, converting everything with the
* prefix to an option where, if there were 2+ dot-separated elements,
* it is converted to a schema.
-   * 
+   * {@code
*   fs.example.s3a.option => s3a:option
*   fs.example.fs.io.policy => s3a.io.policy
*   fs.example.something => something
-   * 
+   * }
* @param builder builder to modify
* @param conf configuration to read
* @param optionalPrefix prefix for optional settings
@@ -196,11 +196,11 @@ public final class FutureIOSupport {
* Propagate options to any builder, converting everything with the
* prefix to an option where, if there were 2+ dot-separated elements,
* it is converted to a schema.
-   * 
+   * {@code
*   fs.example.s3a.option => s3a:option
*   fs.example.fs.io.policy => s3a.io.policy
*   fs.example.something => something
-   * 
+   * }
* @param builder builder to modify
* @param conf configuration to read
* @param prefix prefix to scan/strip
diff --git 

[hadoop] branch branch-3.3 updated: HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.

2020-10-22 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new cf932a7  HDFS-15618. Improve datanode shutdown latency. Contributed by 
Ahmed Hussein.
cf932a7 is described below

commit cf932a7e2d6182471df4eba1333737912a32534b
Author: Kihwal Lee 
AuthorDate: Thu Oct 22 09:55:28 2020 -0500

HDFS-15618. Improve datanode shutdown latency. Contributed by Ahmed Hussein.
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   8 ++
 .../hadoop/hdfs/server/datanode/BlockScanner.java  |  33 -
 .../hadoop/hdfs/server/datanode/DataNode.java  |   4 +-
 .../hadoop/hdfs/server/datanode/VolumeScanner.java |   3 +
 .../server/datanode/VolumeScannerCBInjector.java   |  51 
 .../src/main/resources/hdfs-default.xml|   9 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java |  28 +++-
 .../hdfs/server/datanode/TestBlockScanner.java | 142 +
 8 files changed, 271 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 4b8c27b..5264799 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -846,6 +846,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 
24;  // 3 weeks.
   public static final String  DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = 
"dfs.block.scanner.volume.bytes.per.second";
   public static final long
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
+  /**
+   * The amount of time in milliseconds that the BlockScanner times out waiting
+   * for the VolumeScanner thread to join during a shutdown call.
+   */
+  public static final String  DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY =
+  "dfs.block.scanner.volume.join.timeout.ms";
+  public static final long DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
   public static final String  DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED =
   "dfs.block.scanner.skip.recent.accessed";
   public static final boolean DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED_DEFAULT =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 82efcf8..dc619f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -22,6 +22,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_SKIP_RECENT
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
 
@@ -68,6 +70,12 @@ public class BlockScanner {
*/
   private Conf conf;
 
+  /**
+   * Timeout duration in milliseconds waiting for {@link VolumeScanner} to stop
+   * inside {@link #removeAllVolumeScanners}.
+   */
+  private long joinVolumeScannersTimeOutMs;
+
   @VisibleForTesting
   void setConf(Conf conf) {
 this.conf = conf;
@@ -185,6 +193,9 @@ public class BlockScanner {
 
   public BlockScanner(DataNode datanode, Configuration conf) {
 this.datanode = datanode;
+setJoinVolumeScannersTimeOutMs(
+conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
+DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT));
 this.conf = new Conf(conf);
 if (isEnabled()) {
   LOG.info("Initialized block scanner with targetBytesPerSec {}",
@@ -204,6 +215,13 @@ public class BlockScanner {
 return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec > 0);
   }
 
+  /**
+   * Returns true if there is any scanner thread registered.
+   */
+  public synchronized boolean hasAnyRegisteredScanner() {
+return !scanners.isEmpty();
+  }
+
  /**
   * Set up a scanner for the given block pool and volume.
   *
@@ -268,7 

[hadoop] branch trunk updated: YARN-10453. Add partition resource info to get-node-labels and label-mappings api responses. Contributed Akhil PB.

2020-10-22 Thread sunilg
This is an automated email from the ASF dual-hosted git repository.

sunilg pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7435604  YARN-10453. Add partition resource info to get-node-labels 
and label-mappings api responses. Contributed Akhil PB.
7435604 is described below

commit 7435604a91a49f5c5717083fbaee74dd8ec1c426
Author: Sunil G 
AuthorDate: Tue Oct 20 15:53:44 2020 +0530

YARN-10453. Add partition resource info to get-node-labels and 
label-mappings api responses. Contributed Akhil PB.
---
 .../server/resourcemanager/webapp/NodeIDsInfo.java | 16 ++
 .../resourcemanager/webapp/RMWebServices.java  | 18 +-
 .../resourcemanager/webapp/dao/NodeLabelInfo.java  | 10 
 .../{NodeIDsInfo.java => dao/PartitionInfo.java}   | 30 --
 .../webapp/TestRMWebServicesNodeLabels.java| 67 +-
 5 files changed, 118 insertions(+), 23 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
index 5f45b96..b492793 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.PartitionInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
+
 import java.util.ArrayList;
 import java.util.List;
 
@@ -39,6 +43,9 @@ public class NodeIDsInfo {
   @XmlElement(name="nodes")
   protected ArrayList nodeIDsList = new ArrayList();
 
+  @XmlElement(name = "partitionInfo")
+  private PartitionInfo partitionInfo;
+
   public NodeIDsInfo() {
   } // JAXB needs this
 
@@ -46,7 +53,16 @@ public class NodeIDsInfo {
 this.nodeIDsList.addAll(nodeIdsList);
   }
 
+  public NodeIDsInfo(List nodeIdsList, Resource resource) {
+this(nodeIdsList);
+this.partitionInfo = new PartitionInfo(new ResourceInfo(resource));
+  }
+
   public ArrayList getNodeIDs() {
 return nodeIDsList;
   }
+
+  public PartitionInfo getPartitionInfo() {
+return partitionInfo;
+  }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 02ad3ab..2a725cf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -184,6 +184,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntr
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.PartitionInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.RMQueueAclInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDefinitionInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteRequestInfo;
@@ -209,6 +210,7 @@ import 
org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
 import org.apache.hadoop.yarn.util.AdHocLogDumper;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
 import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
@@ -1296,8 +1298,10 @@ public class RMWebServices extends WebServices 
implements RMWebServiceProtocol {
   for (NodeId nodeId : entry.getValue()) {
 nodeIdStrList.add(nodeId.toString());
   }
+  Resource resource = rm.getRMContext().getNodeLabelManager()
+