[hadoop] branch branch-3.2 updated: HDFS-14861. Reset LowRedundancyBlocks Iterator periodically. Contributed by Stephen O'Donnell.

2020-02-25 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 2377649  HDFS-14861. Reset LowRedundancyBlocks Iterator periodically. 
Contributed by Stephen O'Donnell.
2377649 is described below

commit 2377649cdb1307a72a946b95a9a04f316c6483ef
Author: Stephen O'Donnell 
AuthorDate: Tue Feb 25 13:27:53 2020 -0800

HDFS-14861. Reset LowRedundancyBlocks Iterator periodically. Contributed by 
Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit 900430b9907b590ed2d73a0d68f079c7f4d754b1)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 +++
 .../hdfs/server/blockmanagement/BlockManager.java  | 35 --
 .../blockmanagement/LowRedundancyBlocks.java   | 24 ++-
 .../src/main/resources/hdfs-default.xml| 18 +++
 .../TestLowRedundancyBlockQueues.java  | 27 +
 5 files changed, 105 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e3dac0a..2ba3850 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -223,6 +223,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY;
   public static final int DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT = 3;
+  public static final String DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS =
+  "dfs.namenode.redundancy.queue.restart.iterations";
+  public static final int
+  DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT = 2400;
   public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY =
   HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
   public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ca0942d..e7422df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -301,6 +301,16 @@ public class BlockManager implements BlockStatsMXBean {
*/
   private final long redundancyRecheckIntervalMs;
 
+  /**
+   * Tracks how many calls have been made to chooseLowReduncancyBlocks since
+   * the queue position was last reset to the queue head. If CallsSinceReset
+   * crosses the threshold the next call will reset the iterators. A threshold
+   * of zero means the queue position will only be reset once the next of the
+   * queue has been reached.
+   */
+  private int replQueueResetToHeadThreshold;
+  private int replQueueCallsSinceReset = 0;
+
   /** How often to check and the limit for the storageinfo efficiency. */
   private final long storageInfoDefragmentInterval;
   private final long storageInfoDefragmentTimeout;
@@ -564,6 +574,18 @@ public class BlockManager implements BlockStatsMXBean {
 }
 this.minReplicationToBeInMaintenance = (short)minMaintenanceR;
 
+replQueueResetToHeadThreshold = conf.getInt(
+DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS,
+
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT);
+if (replQueueResetToHeadThreshold < 0) {
+  LOG.warn("{} is set to {} and it must be >= 0. Resetting to default {}",
+  DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS,
+  replQueueResetToHeadThreshold, DFSConfigKeys.
+  DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT);
+  replQueueResetToHeadThreshold = DFSConfigKeys.
+  DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT;
+}
+
 long heartbeatIntervalSecs = conf.getTimeDuration(
 DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
 DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
@@ -1904,9 +1926,18 @@ public class BlockManager implements BlockStatsMXBean {
 List> blocksToReconstruct = null;
 namesystem.writeLock();
 try {
-  // Choose the blocks to be reconstructed
+  boolean reset = false;
+  if (replQueueResetToHeadThreshold > 0) {
+if (replQueueCallsSinceReset >= replQueueResetToHeadThreshold) {
+  reset = 

[hadoop] branch trunk updated: HDFS-14861. Reset LowRedundancyBlocks Iterator periodically. Contributed by Stephen O'Donnell.

2020-02-25 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 900430b  HDFS-14861. Reset LowRedundancyBlocks Iterator periodically. 
Contributed by Stephen O'Donnell.
900430b is described below

commit 900430b9907b590ed2d73a0d68f079c7f4d754b1
Author: Stephen O'Donnell 
AuthorDate: Tue Feb 25 13:27:53 2020 -0800

HDFS-14861. Reset LowRedundancyBlocks Iterator periodically. Contributed by 
Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 +++
 .../hdfs/server/blockmanagement/BlockManager.java  | 35 --
 .../blockmanagement/LowRedundancyBlocks.java   | 24 ++-
 .../src/main/resources/hdfs-default.xml| 18 +++
 .../TestLowRedundancyBlockQueues.java  | 27 +
 5 files changed, 105 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index bb8039c..51900a4 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -244,6 +244,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY;
   public static final int DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT = 3;
+  public static final String DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS =
+  "dfs.namenode.redundancy.queue.restart.iterations";
+  public static final int
+  DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT = 2400;
   public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY =
   HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
   public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 626048f..e2b22d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -300,6 +300,16 @@ public class BlockManager implements BlockStatsMXBean {
*/
   private final long redundancyRecheckIntervalMs;
 
+  /**
+   * Tracks how many calls have been made to chooseLowReduncancyBlocks since
+   * the queue position was last reset to the queue head. If CallsSinceReset
+   * crosses the threshold the next call will reset the iterators. A threshold
+   * of zero means the queue position will only be reset once the next of the
+   * queue has been reached.
+   */
+  private int replQueueResetToHeadThreshold;
+  private int replQueueCallsSinceReset = 0;
+
   /** How often to check and the limit for the storageinfo efficiency. */
   private final long storageInfoDefragmentInterval;
   private final long storageInfoDefragmentTimeout;
@@ -572,6 +582,18 @@ public class BlockManager implements BlockStatsMXBean {
 }
 this.minReplicationToBeInMaintenance = (short)minMaintenanceR;
 
+replQueueResetToHeadThreshold = conf.getInt(
+DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS,
+
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT);
+if (replQueueResetToHeadThreshold < 0) {
+  LOG.warn("{} is set to {} and it must be >= 0. Resetting to default {}",
+  DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS,
+  replQueueResetToHeadThreshold, DFSConfigKeys.
+  DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT);
+  replQueueResetToHeadThreshold = DFSConfigKeys.
+  DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT;
+}
+
 long heartbeatIntervalSecs = conf.getTimeDuration(
 DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
 DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
@@ -1912,9 +1934,18 @@ public class BlockManager implements BlockStatsMXBean {
 List> blocksToReconstruct = null;
 namesystem.writeLock();
 try {
-  // Choose the blocks to be reconstructed
+  boolean reset = false;
+  if (replQueueResetToHeadThreshold > 0) {
+if (replQueueCallsSinceReset >= replQueueResetToHeadThreshold) {
+  reset = true;
+  replQueueCallsSinceReset = 0;
+} else {
+  

[hadoop] branch branch-3.1 updated: HDFS-14861. Reset LowRedundancyBlocks Iterator periodically. Contributed by Stephen O'Donnell.

2020-02-25 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 8aaa8d1  HDFS-14861. Reset LowRedundancyBlocks Iterator periodically. 
Contributed by Stephen O'Donnell.
8aaa8d1 is described below

commit 8aaa8d1b712443da991291f98d5def2dde958a89
Author: Stephen O'Donnell 
AuthorDate: Tue Feb 25 13:27:53 2020 -0800

HDFS-14861. Reset LowRedundancyBlocks Iterator periodically. Contributed by 
Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit 900430b9907b590ed2d73a0d68f079c7f4d754b1)
(cherry picked from commit 2377649cdb1307a72a946b95a9a04f316c6483ef)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 +++
 .../hdfs/server/blockmanagement/BlockManager.java  | 35 --
 .../blockmanagement/LowRedundancyBlocks.java   | 24 ++-
 .../src/main/resources/hdfs-default.xml| 18 +++
 .../TestLowRedundancyBlockQueues.java  | 27 +
 5 files changed, 105 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 117ff36..bfa35bd 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -217,6 +217,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY;
   public static final int DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT = 3;
+  public static final String DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS =
+  "dfs.namenode.redundancy.queue.restart.iterations";
+  public static final int
+  DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT = 2400;
   public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY =
   HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
   public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 571a62c..34c6fd1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -298,6 +298,16 @@ public class BlockManager implements BlockStatsMXBean {
*/
   private final long redundancyRecheckIntervalMs;
 
+  /**
+   * Tracks how many calls have been made to chooseLowReduncancyBlocks since
+   * the queue position was last reset to the queue head. If CallsSinceReset
+   * crosses the threshold the next call will reset the iterators. A threshold
+   * of zero means the queue position will only be reset once the next of the
+   * queue has been reached.
+   */
+  private int replQueueResetToHeadThreshold;
+  private int replQueueCallsSinceReset = 0;
+
   /** How often to check and the limit for the storageinfo efficiency. */
   private final long storageInfoDefragmentInterval;
   private final long storageInfoDefragmentTimeout;
@@ -553,6 +563,18 @@ public class BlockManager implements BlockStatsMXBean {
 }
 this.minReplicationToBeInMaintenance = (short)minMaintenanceR;
 
+replQueueResetToHeadThreshold = conf.getInt(
+DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS,
+
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT);
+if (replQueueResetToHeadThreshold < 0) {
+  LOG.warn("{} is set to {} and it must be >= 0. Resetting to default {}",
+  DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS,
+  replQueueResetToHeadThreshold, DFSConfigKeys.
+  DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT);
+  replQueueResetToHeadThreshold = DFSConfigKeys.
+  DFS_NAMENODE_REDUNDANCY_QUEUE_RESTART_ITERATIONS_DEFAULT;
+}
+
 long heartbeatIntervalSecs = conf.getTimeDuration(
 DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
 DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
@@ -1890,9 +1912,18 @@ public class BlockManager implements BlockStatsMXBean {
 List> blocksToReconstruct = null;
 namesystem.writeLock();
 try {
-  // Choose the blocks to be reconstructed
+  boolean reset = false;
+  if (replQueueResetToHeadThreshold > 0) {
+if 

[hadoop] branch trunk updated: YARN-8767. TestStreamingStatus fails. Contributed by Andras Bokor

2020-02-25 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8dc0794  YARN-8767. TestStreamingStatus fails. Contributed by Andras 
Bokor
8dc0794 is described below

commit 8dc079455e38938586885bc08fb7de5d9c0f0dbc
Author: Szilard Nemeth 
AuthorDate: Tue Feb 25 21:48:16 2020 +0100

YARN-8767. TestStreamingStatus fails. Contributed by Andras Bokor
---
 .../hadoop/streaming/TestStreamingStatus.java  | 63 --
 1 file changed, 36 insertions(+), 27 deletions(-)

diff --git 
a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStatus.java
 
b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStatus.java
index b2af40a..5cdb0d4 100644
--- 
a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStatus.java
+++ 
b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStatus.java
@@ -22,6 +22,9 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.File;
 
+import org.apache.hadoop.mapred.MiniMRClientCluster;
+import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -32,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.Counters;
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.hadoop.mapred.TaskAttemptID;
 import org.apache.hadoop.mapred.TaskID;
 import org.apache.hadoop.mapred.TaskLog;
@@ -90,7 +92,7 @@ public class TestStreamingStatus {
 "print STDERR \"my error msg after consuming input\\n\";\n" +
 "print STDERR \"reporter:counter:myOwnCounterGroup,myOwnCounter,1\\n\";\n";
 
-  MiniMRCluster mr = null;
+  private MiniMRClientCluster mr;
   FileSystem fs = null;
   JobConf conf = null;
 
@@ -105,10 +107,10 @@ public class TestStreamingStatus {
 conf.setBoolean(JTConfig.JT_RETIREJOBS, false);
 conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false);
 
-mr = new MiniMRCluster(1, "file:///", 3, null , null, conf);
+mr = MiniMRClientClusterFactory.create(this.getClass(), 3, conf);
 
 Path inFile = new Path(INPUT_FILE);
-fs = inFile.getFileSystem(mr.createJobConf());
+fs = inFile.getFileSystem(mr.getConfig());
 clean(fs);
 
 buildExpectedJobOutput();
@@ -118,9 +120,13 @@ public class TestStreamingStatus {
* Kill the cluster after the test is done.
*/
   @After
-  public void tearDown() {
-if (fs != null) { clean(fs); }
-if (mr != null) { mr.shutdown(); }
+  public void tearDown() throws IOException {
+if (fs != null) {
+  clean(fs);
+}
+if (mr != null) {
+  mr.stop();
+}
   }
 
   // Updates expectedOutput to have the expected job output as a string
@@ -146,21 +152,24 @@ public class TestStreamingStatus {
 file.close();
   }
 
-  protected String[] genArgs(String jobtracker, String mapper, String reducer)
+  protected String[] genArgs(String jobtracker, String rmAddress,
+ String mapper, String reducer)
   {
 return new String[] {
-  "-input", INPUT_FILE,
-  "-output", OUTPUT_DIR,
-  "-mapper", mapper,
-  "-reducer", reducer,
-  "-jobconf", MRJobConfig.NUM_MAPS + "=1",
-  "-jobconf", MRJobConfig.NUM_REDUCES + "=1",
-  "-jobconf", MRJobConfig.PRESERVE_FAILED_TASK_FILES + "=true",
-  "-jobconf", "stream.tmpdir=" + new Path(TEST_ROOT_DIR).toUri().getPath(),
-  "-jobconf", JTConfig.JT_IPC_ADDRESS + "="+jobtracker,
-  "-jobconf", "fs.default.name=file:///",
-  "-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR,
-  "-jobconf", "mapreduce.framework.name=yarn"
+"-input", INPUT_FILE,
+"-output", OUTPUT_DIR,
+"-mapper", mapper,
+"-reducer", reducer,
+"-jobconf", MRJobConfig.NUM_MAPS + "=1",
+"-jobconf", MRJobConfig.NUM_REDUCES + "=1",
+"-jobconf", MRJobConfig.PRESERVE_FAILED_TASK_FILES + "=true",
+"-jobconf", YarnConfiguration.RM_ADDRESS + "=" + rmAddress,
+"-jobconf", "stream.tmpdir=" +
+new Path(TEST_ROOT_DIR).toUri().getPath(),
+"-jobconf", JTConfig.JT_IPC_ADDRESS + "="+jobtracker,
+"-jobconf", "fs.default.name=file:///",
+"-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR,
+"-jobconf", "mapreduce.framework.name=yarn"
 };
   }
 
@@ -218,10 +227,9 @@ public class TestStreamingStatus {
* Run another streaming job with the given script as reducer and validate.
*
* @param isEmptyInput Should the input to the script be empty ?
-   * @param script The content of the script that will run as the streaming 
task

[hadoop] branch trunk updated: YARN-10130. FS-CS converter: Do not allow output dir to be the same as input dir. Contributed by Adam Antal

2020-02-25 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d68616b  YARN-10130. FS-CS converter: Do not allow output dir to be 
the same as input dir. Contributed by Adam Antal
d68616b is described below

commit d68616b0453881bec8dcb917a0b7d3fbe0838965
Author: Szilard Nemeth 
AuthorDate: Tue Feb 25 21:30:04 2020 +0100

YARN-10130. FS-CS converter: Do not allow output dir to be the same as 
input dir. Contributed by Adam Antal
---
 .../FSConfigToCSConfigArgumentHandler.java | 48 +
 .../converter/FSConfigConverterTestCommons.java|  2 +-
 .../TestFSConfigToCSConfigArgumentHandler.java | 63 ++
 3 files changed, 112 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java
index c751953..9121a6d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java
@@ -25,6 +25,7 @@ import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,6 +40,13 @@ public class FSConfigToCSConfigArgumentHandler {
   private static final Logger LOG =
   LoggerFactory.getLogger(FSConfigToCSConfigArgumentHandler.class);
 
+  private static final String ALREADY_CONTAINS_EXCEPTION_MSG =
+  "The %s (provided with %s|%s arguments) contains " +
+  "the %s provided with the %s|%s options.";
+  private static final String ALREADY_CONTAINS_FILE_EXCEPTION_MSG =
+  "The %s %s (provided with %s|%s arguments) already contains a file " +
+  "or directory named %s which will be the output of the conversion!";
+
   private FSConfigToCSConfigRuleHandler ruleHandler;
   private FSConfigToCSConfigConverterParams converterParams;
   private ConversionOptions conversionOptions;
@@ -213,6 +221,7 @@ public class FSConfigToCSConfigArgumentHandler {
 checkFile(CliOption.FAIR_SCHEDULER, fairSchedulerXmlFile);
 checkFile(CliOption.CONVERSION_RULES, conversionRulesFile);
 checkDirectory(CliOption.OUTPUT_DIR, outputDir);
+checkOutputDirDoesNotContainXmls(yarnSiteXmlFile, outputDir);
 
 return FSConfigToCSConfigConverterParams.Builder.create()
 .withYarnSiteXmlConfig(yarnSiteXmlFile)
@@ -225,6 +234,45 @@ public class FSConfigToCSConfigArgumentHandler {
 .build();
   }
 
+  private static void checkOutputDirDoesNotContainXmls(String yarnSiteXmlFile,
+  String outputDir) {
+if (yarnSiteXmlFile == null || outputDir == null) {
+  return;
+}
+
+// check whether yarn-site.xml is not in the output folder
+File xmlFile = new File(yarnSiteXmlFile);
+File xmlParentFolder = xmlFile.getParentFile();
+File output = new File(outputDir);
+if (output.equals(xmlParentFolder)) {
+  throw new IllegalArgumentException(
+  String.format(ALREADY_CONTAINS_EXCEPTION_MSG,
+  CliOption.OUTPUT_DIR.name, CliOption.OUTPUT_DIR.shortSwitch,
+  CliOption.OUTPUT_DIR.longSwitch, CliOption.YARN_SITE.name,
+  CliOption.YARN_SITE.shortSwitch,
+  CliOption.YARN_SITE.longSwitch));
+}
+
+// check whether the output folder does not contain nor yarn-site.xml
+// neither capacity-scheduler.xml
+checkFileNotInOutputDir(output,
+YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
+checkFileNotInOutputDir(output,
+YarnConfiguration.CS_CONFIGURATION_FILE);
+  }
+
+  private static void checkFileNotInOutputDir(File output, String fileName) {
+File file = new File(output, fileName);
+if (file.exists()) {
+  throw new IllegalArgumentException(
+  String.format(ALREADY_CONTAINS_FILE_EXCEPTION_MSG,
+  CliOption.OUTPUT_DIR.name, output,
+  CliOption.OUTPUT_DIR.shortSwitch,
+  CliOption.OUTPUT_DIR.longSwitch,
+  fileName));
+}
+  }
+
   private void printHelp(Options opts) {
 HelpFormatter formatter = new 

[hadoop-thirdparty] branch branch-1.0 created (now 16f6d2a)

2020-02-25 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch branch-1.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


  at 16f6d2a  Preparing for 1.0.0 Release

This branch includes the following new commits:

 new 16f6d2a  Preparing for 1.0.0 Release

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] tag release-1.0.0-RC0 created (now 16f6d2a)

2020-02-25 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to tag release-1.0.0-RC0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


  at 16f6d2a  (commit)
This tag includes the following new commits:

 new 16f6d2a  Preparing for 1.0.0 Release

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] 01/01: Preparing for 1.0.0 Release

2020-02-25 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-1.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git

commit 16f6d2ac9a8c7d3ad4ef63e20ed2e3e4dacc474e
Author: Vinayakumar B 
AuthorDate: Tue Feb 25 22:25:48 2020 +0530

Preparing for 1.0.0 Release
---
 hadoop-shaded-jaeger/pom.xml   | 2 +-
 hadoop-shaded-protobuf_3_7/pom.xml | 2 +-
 pom.xml| 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-shaded-jaeger/pom.xml b/hadoop-shaded-jaeger/pom.xml
index 90513f7..994e892 100644
--- a/hadoop-shaded-jaeger/pom.xml
+++ b/hadoop-shaded-jaeger/pom.xml
@@ -23,7 +23,7 @@
 
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.0.0-SNAPSHOT
+1.0.0
 
 4.0.0
 
diff --git a/hadoop-shaded-protobuf_3_7/pom.xml 
b/hadoop-shaded-protobuf_3_7/pom.xml
index 5a622cd..87d9ab6 100644
--- a/hadoop-shaded-protobuf_3_7/pom.xml
+++ b/hadoop-shaded-protobuf_3_7/pom.xml
@@ -23,7 +23,7 @@
   
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.0.0-SNAPSHOT
+1.0.0
 ..
   
   4.0.0
diff --git a/pom.xml b/pom.xml
index b663461..646577d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -23,7 +23,7 @@
   4.0.0
   org.apache.hadoop.thirdparty
   hadoop-thirdparty
-  1.0.0-SNAPSHOT
+  1.0.0
   
 org.apache
 apache


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: Preparing for 1.1.0-SNAPSHOT development

2020-02-25 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 85ae8d7  Preparing for 1.1.0-SNAPSHOT development
85ae8d7 is described below

commit 85ae8d7c582a696a6eef45d4e7a814e2e0222388
Author: Vinayakumar B 
AuthorDate: Tue Feb 25 22:40:30 2020 +0530

Preparing for 1.1.0-SNAPSHOT development
---
 hadoop-shaded-jaeger/pom.xml   | 2 +-
 hadoop-shaded-protobuf_3_7/pom.xml | 2 +-
 pom.xml| 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-shaded-jaeger/pom.xml b/hadoop-shaded-jaeger/pom.xml
index 90513f7..0b4db23 100644
--- a/hadoop-shaded-jaeger/pom.xml
+++ b/hadoop-shaded-jaeger/pom.xml
@@ -23,7 +23,7 @@
 
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.0.0-SNAPSHOT
+1.1.0-SNAPSHOT
 
 4.0.0
 
diff --git a/hadoop-shaded-protobuf_3_7/pom.xml 
b/hadoop-shaded-protobuf_3_7/pom.xml
index 5a622cd..90f9def 100644
--- a/hadoop-shaded-protobuf_3_7/pom.xml
+++ b/hadoop-shaded-protobuf_3_7/pom.xml
@@ -23,7 +23,7 @@
   
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.0.0-SNAPSHOT
+1.1.0-SNAPSHOT
 ..
   
   4.0.0
diff --git a/pom.xml b/pom.xml
index b663461..a8d44a5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -23,7 +23,7 @@
   4.0.0
   org.apache.hadoop.thirdparty
   hadoop-thirdparty
-  1.0.0-SNAPSHOT
+  1.1.0-SNAPSHOT
   
 org.apache
 apache


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-13404. Addendum: RBF: TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fail. Contributed by Takanobu Asanuma.

2020-02-25 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 3b86011  HDFS-13404. Addendum: RBF: 
TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fail. Contributed 
by Takanobu Asanuma.
3b86011 is described below

commit 3b86011c24c441f2b46ef612ceaaede3298ebd31
Author: Kihwal Lee 
AuthorDate: Tue Feb 25 12:28:56 2020 -0600

HDFS-13404. Addendum: RBF: 
TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fail. Contributed 
by Takanobu Asanuma.

(cherry picked from commit b52fd05d42d9a76f6936a5d86c23fcd66244fe3d)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
---
 .../org/apache/hadoop/fs/contract/AbstractContractAppendTest.java   | 6 ++
 1 file changed, 6 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
index d61b635..a9fb117 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
@@ -133,6 +133,12 @@ public abstract class AbstractContractAppendTest extends 
AbstractFSContractTestB
 assertPathExists("original file does not exist", target);
 byte[] dataset = dataset(256, 'a', 'z');
 FSDataOutputStream outputStream = getFileSystem().append(target);
+if (isSupported(CREATE_VISIBILITY_DELAYED)) {
+  // Some filesystems like WebHDFS doesn't assure sequential consistency.
+  // In such a case, delay is needed. Given that we can not check the lease
+  // because here is closed in client side package, simply add a sleep.
+  Thread.sleep(100);
+}
 outputStream.write(dataset);
 Path renamed = new Path(testPath, "renamed");
 rename(target, renamed);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: Revert "HDFS-6874. Add GETFILEBLOCKLOCATIONS operation to HttpFS. Contributed by Weiwei Yang"

2020-02-25 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new e80b5ec  Revert "HDFS-6874. Add GETFILEBLOCKLOCATIONS operation to 
HttpFS.  Contributed by Weiwei Yang"
e80b5ec is described below

commit e80b5ec58dbad292ee4c3604d07e1f6c29603d34
Author: Kihwal Lee 
AuthorDate: Tue Feb 25 11:33:20 2020 -0600

Revert "HDFS-6874. Add GETFILEBLOCKLOCATIONS operation to HttpFS.  
Contributed by Weiwei Yang"

This reverts commit b4a108fa9f38ee028978474fe6c298bbd88fda7a.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
---
 .../hadoop/fs/http/client/HttpFSFileSystem.java| 42 --
 .../apache/hadoop/fs/http/server/FSOperations.java | 38 -
 .../fs/http/server/HttpFSParametersProvider.java   |  3 +-
 .../apache/hadoop/fs/http/server/HttpFSServer.java | 21 +
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  | 89 +-
 5 files changed, 3 insertions(+), 190 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 6e39f5b..596aef7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -23,12 +23,9 @@ import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.type.MapType;
 import com.google.common.base.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -124,8 +121,6 @@ public class HttpFSFileSystem extends FileSystem
   public static final String NEW_LENGTH_PARAM = "newlength";
   public static final String START_AFTER_PARAM = "startAfter";
   public static final String POLICY_NAME_PARAM = "storagepolicy";
-  public static final String OFFSET_PARAM = "offset";
-  public static final String LENGTH_PARAM = "length";
   public static final String SNAPSHOT_NAME_PARAM = "snapshotname";
   public static final String OLD_SNAPSHOT_NAME_PARAM = "oldsnapshotname";
 
@@ -218,7 +213,6 @@ public class HttpFSFileSystem extends FileSystem
 
   public static final String STORAGE_POLICIES_JSON = "BlockStoragePolicies";
   public static final String STORAGE_POLICY_JSON = "BlockStoragePolicy";
-  public static final String BLOCK_LOCATIONS_JSON = "BlockLocations";
 
   public static final int HTTP_TEMPORARY_REDIRECT = 307;
 
@@ -1429,42 +1423,6 @@ public class HttpFSFileSystem extends FileSystem
 return createStoragePolicy((JSONObject) json.get(STORAGE_POLICY_JSON));
   }
 
-  @Override
-  public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
-  long len) throws IOException {
-Map params = new HashMap();
-params.put(OP_PARAM, Operation.GETFILEBLOCKLOCATIONS.toString());
-params.put(OFFSET_PARAM, Long.toString(start));
-params.put(LENGTH_PARAM, Long.toString(len));
-HttpURLConnection conn =
-getConnection(Operation.GETFILEBLOCKLOCATIONS.getMethod(), params,
-file.getPath(), true);
-HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
-JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
-return toBlockLocations(json);
-  }
-
-  private BlockLocation[] toBlockLocations(JSONObject json)
-  throws IOException {
-ObjectMapper mapper = new ObjectMapper();
-MapType subType = mapper.getTypeFactory().constructMapType(
-Map.class,
-String.class,
-BlockLocation[].class);
-MapType rootType = mapper.getTypeFactory().constructMapType(
-Map.class,
-mapper.constructType(String.class),
-mapper.constructType(subType));
-
-Map> jsonMap = mapper
-.readValue(json.toJSONString(), rootType);
-Map locationMap = jsonMap
-.get(BLOCK_LOCATIONS_JSON);
-BlockLocation[] locationArray = locationMap.get(
-BlockLocation.class.getSimpleName());
-return locationArray;
-  }
-
   private BlockStoragePolicy createStoragePolicy(JSONObject policyJson)
   throws 

[hadoop] branch branch-2.10 updated: YARN-10140: TestTimelineAuthFilterForV2 fails due to login failures in branch-2.10. Contributed by Ahmed Hussein (ahussein)

2020-02-25 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 254b94b  YARN-10140: TestTimelineAuthFilterForV2 fails due to login 
failures in branch-2.10. Contributed by Ahmed Hussein (ahussein)
254b94b is described below

commit 254b94bf5e8037216d35e8664ac1cddc6400fb8f
Author: Eric E Payne 
AuthorDate: Tue Feb 25 16:36:50 2020 +

YARN-10140: TestTimelineAuthFilterForV2 fails due to login failures in 
branch-2.10. Contributed by Ahmed Hussein (ahussein)
---
 .../server/timelineservice/security/TestTimelineAuthFilterForV2.java   | 3 +++
 1 file changed, 3 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
index e7bbd2b..fba4150 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import 
org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.token.Token;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
@@ -137,6 +138,8 @@ public class TestTimelineAuthFilterForV2 {
   testMiniKDC.start();
   testMiniKDC.createPrincipal(
   httpSpnegoKeytabFile, HTTP_USER + "/localhost");
+  // reset the realm after the testMiniKDC has been created
+  KerberosName.resetDefaultRealm();
 } catch (Exception e) {
   fail("Couldn't setup MiniKDC.");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org