[hadoop] branch trunk updated: MAPREDUCE-7241. FileInputFormat listStatus with less memory footprint. Contributed by Zhihua Deng

2020-04-01 Thread jlowe
This is an automated email from the ASF dual-hosted git repository.

jlowe pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c613296  MAPREDUCE-7241. FileInputFormat listStatus with less memory 
footprint. Contributed by Zhihua Deng
c613296 is described below

commit c613296dc85ac7b22c171c84f578106b315cc012
Author: Jason Lowe 
AuthorDate: Wed Apr 1 07:45:42 2020 -0500

MAPREDUCE-7241. FileInputFormat listStatus with less memory footprint. 
Contributed by Zhihua Deng
---
 .../org/apache/hadoop/mapred/FileInputFormat.java  |  6 +-
 .../hadoop/mapred/LocatedFileStatusFetcher.java|  3 +-
 .../mapreduce/lib/input/FileInputFormat.java   | 37 --
 .../mapreduce/lib/input/TestFileInputFormat.java   | 79 --
 4 files changed, 114 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
index b3e2b4a..b738037 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
@@ -193,7 +193,8 @@ public abstract class FileInputFormat implements 
InputFormat {
 if (stat.isDirectory()) {
   addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
 } else {
-  result.add(stat);
+  result.add(org.apache.hadoop.mapreduce.lib.input.
+  FileInputFormat.shrinkStatus(stat));
 }
   }
 }
@@ -290,7 +291,8 @@ public abstract class FileInputFormat implements 
InputFormat {
   addInputPathRecursively(result, fs, stat.getPath(),
   inputFilter);
 } else {
-  result.add(stat);
+  result.add(org.apache.hadoop.mapreduce.lib.input.
+  FileInputFormat.shrinkStatus(stat));
 }
   }
 }
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
index a248f14..4cb36a5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
@@ -259,7 +259,8 @@ public class LocatedFileStatusFetcher {
 if (recursive && stat.isDirectory()) {
   result.dirsNeedingRecursiveCalls.add(stat);
 } else {
-  result.locatedFileStatuses.add(stat);
+  result.locatedFileStatuses.add(org.apache.hadoop.mapreduce.lib.
+  input.FileInputFormat.shrinkStatus(stat));
 }
   }
 }
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
index 22efe14..1b3365c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
@@ -325,7 +325,7 @@ public abstract class FileInputFormat extends 
InputFormat {
   addInputPathRecursively(result, fs, stat.getPath(),
   inputFilter);
 } else {
-  result.add(stat);
+  result.add(shrinkStatus(stat));
 }
   }
 }
@@ -364,13 +364,42 @@ public abstract class FileInputFormat extends 
InputFormat {
 if (stat.isDirectory()) {
   addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
 } else {
-  result.add(stat);
+  result.add(shrinkStatus(stat));
 }
   }
 }
   }
-  
-  
+
+  /**
+   * The HdfsBlockLocation includes a LocatedBlock which contains messages
+   * for issuing more detailed queries to datanodes about a block, but these
+   * messages are useless during job submission c

[hadoop] branch branch-3.0 updated: Revert "HDFS-14084. Need for more stats in DFSClient. Contributed by Pranay Singh."

2019-01-09 Thread jlowe
This is an automated email from the ASF dual-hosted git repository.

jlowe pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
 new 642213a  Revert "HDFS-14084. Need for more stats in DFSClient. 
Contributed by Pranay Singh."
642213a is described below

commit 642213a74a5fca1c6234303353f84d77dc3f451c
Author: Jason Lowe 
AuthorDate: Wed Jan 9 17:56:32 2019 -0600

Revert "HDFS-14084. Need for more stats in DFSClient. Contributed by Pranay 
Singh."

This reverts commit a4f84213241c647c973acfa460ac4f4b0895aa2d.
---
 .../main/java/org/apache/hadoop/ipc/Client.java| 25 --
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   | 16 +++---
 .../hadoop/ipc/metrics/RpcDetailedMetrics.java | 12 ---
 3 files changed, 7 insertions(+), 46 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index dcb3633..9ee0647 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
-import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.RPC.RpcKind;
 import org.apache.hadoop.ipc.Server.AuthProtocol;
 import 
org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
@@ -87,7 +86,6 @@ import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 public class Client implements AutoCloseable {
   
   public static final Logger LOG = LoggerFactory.getLogger(Client.class);
-  private final RpcDetailedMetrics rpcDetailedMetrics;
 
   /** A counter for generating call IDs. */
   private static final AtomicInteger callIdCounter = new AtomicInteger();
@@ -210,24 +208,6 @@ public class Client implements AutoCloseable {
   };
   
   /**
-   * Update a particular metric by recording the processing
-   * time of the metric.
-   *
-   * @param name Metric name
-   * @param processingTime time spent in processing the metric.
-   */
-  public void updateMetrics(String name, long processingTime) {
-rpcDetailedMetrics.addProcessingTime(name, processingTime);
-  }
-
-  /**
-   * Get the RpcDetailedMetrics associated with the Client.
-   */
-  public RpcDetailedMetrics getRpcDetailedMetrics() {
-return rpcDetailedMetrics;
-  }
-
-  /**
* set the ping interval value in configuration
* 
* @param conf Configuration
@@ -1315,11 +1295,6 @@ public class Client implements AutoCloseable {
 this.maxAsyncCalls = conf.getInt(
 CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
 CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT);
-/**
- * Create with port of -1, dummy port since the function
- * takes default argument.
- */
-this.rpcDetailedMetrics = RpcDetailedMetrics.create(-1);
   }
 
   /**
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index fa3b61a..639bbad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -49,8 +49,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.metrics2.MetricStringBuilder;
-import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 /**
  * RPC Engine for for protobuf based RPCs.
@@ -192,7 +190,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = System.currentTimeMillis();
+startTime = Time.now();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -247,16 +245,8 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = System.currentTimeMillis() - startTime;
-if (callTime > 0) {
-  MetricStringBuilder rb =
-  new MetricStringBuilder(null, "", " = ", "\n");
-  client.updateMetrics(method.getName(), callTime);
-  MutableRatesWithAggregation rates =
-  client.getRpcDetailedMetrics().getMutableRates();
-  rates.snapshot(rb, t

[hadoop] branch branch-3.1.2 updated: Revert "HDFS-14084. Need for more stats in DFSClient. Contributed by Pranay Singh."

2019-01-09 Thread jlowe
This is an automated email from the ASF dual-hosted git repository.

jlowe pushed a commit to branch branch-3.1.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1.2 by this push:
 new 9f88af7  Revert "HDFS-14084. Need for more stats in DFSClient. 
Contributed by Pranay Singh."
9f88af7 is described below

commit 9f88af79029606b29a5d0ff9bea661a2a83a1dfb
Author: Jason Lowe 
AuthorDate: Wed Jan 9 17:52:26 2019 -0600

Revert "HDFS-14084. Need for more stats in DFSClient. Contributed by Pranay 
Singh."

This reverts commit e8e55839a0c2b5479d7a25256ed1db751e09c122.
---
 .../main/java/org/apache/hadoop/ipc/Client.java| 25 --
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   | 16 +++---
 .../hadoop/ipc/metrics/RpcDetailedMetrics.java | 12 ---
 3 files changed, 7 insertions(+), 46 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index f01ac30..07a2f13 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
-import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.RPC.RpcKind;
 import org.apache.hadoop.ipc.Server.AuthProtocol;
 import 
org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
@@ -87,7 +86,6 @@ import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 public class Client implements AutoCloseable {
   
   public static final Logger LOG = LoggerFactory.getLogger(Client.class);
-  private final RpcDetailedMetrics rpcDetailedMetrics;
 
   /** A counter for generating call IDs. */
   private static final AtomicInteger callIdCounter = new AtomicInteger();
@@ -211,24 +209,6 @@ public class Client implements AutoCloseable {
   };
   
   /**
-   * Update a particular metric by recording the processing
-   * time of the metric.
-   *
-   * @param name Metric name
-   * @param processingTime time spent in processing the metric.
-   */
-  public void updateMetrics(String name, long processingTime) {
-rpcDetailedMetrics.addProcessingTime(name, processingTime);
-  }
-
-  /**
-   * Get the RpcDetailedMetrics associated with the Client.
-   */
-  public RpcDetailedMetrics getRpcDetailedMetrics() {
-return rpcDetailedMetrics;
-  }
-
-  /**
* set the ping interval value in configuration
* 
* @param conf Configuration
@@ -1321,11 +1301,6 @@ public class Client implements AutoCloseable {
 this.maxAsyncCalls = conf.getInt(
 CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
 CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT);
-/**
- * Create with port of -1, dummy port since the function
- * takes default argument.
- */
-this.rpcDetailedMetrics = RpcDetailedMetrics.create(-1);
   }
 
   /**
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index fa3b61a..639bbad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -49,8 +49,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.metrics2.MetricStringBuilder;
-import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 /**
  * RPC Engine for for protobuf based RPCs.
@@ -192,7 +190,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = System.currentTimeMillis();
+startTime = Time.now();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -247,16 +245,8 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = System.currentTimeMillis() - startTime;
-if (callTime > 0) {
-  MetricStringBuilder rb =
-  new MetricStringBuilder(null, "", " = ", "\n");
-  client.updateMetrics(method.getName(), callTime);
-  MutableRatesWithAggregation rates =
-  client.getRpcDetailedMetrics().getMutableRates();
-  rates.snapshot(rb, t

[hadoop] branch branch-3.1 updated: Revert "HDFS-14084. Need for more stats in DFSClient. Contributed by Pranay Singh."

2019-01-09 Thread jlowe
This is an automated email from the ASF dual-hosted git repository.

jlowe pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new d8aac6c  Revert "HDFS-14084. Need for more stats in DFSClient. 
Contributed by Pranay Singh."
d8aac6c is described below

commit d8aac6c2156d4217c2a95cd15c25ca88cf2fcfbd
Author: Jason Lowe 
AuthorDate: Wed Jan 9 17:48:16 2019 -0600

Revert "HDFS-14084. Need for more stats in DFSClient. Contributed by Pranay 
Singh."

This reverts commit e8e55839a0c2b5479d7a25256ed1db751e09c122.
---
 .../main/java/org/apache/hadoop/ipc/Client.java| 25 --
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   | 16 +++---
 .../hadoop/ipc/metrics/RpcDetailedMetrics.java | 12 ---
 3 files changed, 7 insertions(+), 46 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index f01ac30..07a2f13 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
-import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.RPC.RpcKind;
 import org.apache.hadoop.ipc.Server.AuthProtocol;
 import 
org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
@@ -87,7 +86,6 @@ import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 public class Client implements AutoCloseable {
   
   public static final Logger LOG = LoggerFactory.getLogger(Client.class);
-  private final RpcDetailedMetrics rpcDetailedMetrics;
 
   /** A counter for generating call IDs. */
   private static final AtomicInteger callIdCounter = new AtomicInteger();
@@ -211,24 +209,6 @@ public class Client implements AutoCloseable {
   };
   
   /**
-   * Update a particular metric by recording the processing
-   * time of the metric.
-   *
-   * @param name Metric name
-   * @param processingTime time spent in processing the metric.
-   */
-  public void updateMetrics(String name, long processingTime) {
-rpcDetailedMetrics.addProcessingTime(name, processingTime);
-  }
-
-  /**
-   * Get the RpcDetailedMetrics associated with the Client.
-   */
-  public RpcDetailedMetrics getRpcDetailedMetrics() {
-return rpcDetailedMetrics;
-  }
-
-  /**
* set the ping interval value in configuration
* 
* @param conf Configuration
@@ -1321,11 +1301,6 @@ public class Client implements AutoCloseable {
 this.maxAsyncCalls = conf.getInt(
 CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
 CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT);
-/**
- * Create with port of -1, dummy port since the function
- * takes default argument.
- */
-this.rpcDetailedMetrics = RpcDetailedMetrics.create(-1);
   }
 
   /**
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index fa3b61a..639bbad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -49,8 +49,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.metrics2.MetricStringBuilder;
-import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 /**
  * RPC Engine for for protobuf based RPCs.
@@ -192,7 +190,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = System.currentTimeMillis();
+startTime = Time.now();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -247,16 +245,8 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = System.currentTimeMillis() - startTime;
-if (callTime > 0) {
-  MetricStringBuilder rb =
-  new MetricStringBuilder(null, "", " = ", "\n");
-  client.updateMetrics(method.getName(), callTime);
-  MutableRatesWithAggregation rates =
-  client.getRpcDetailedMetrics().getMutableRates();
-  rates.snapshot(rb, t

[hadoop] branch branch-3.2 updated: Revert "HDFS-14084. Need for more stats in DFSClient. Contributed by Pranay Singh."

2019-01-09 Thread jlowe
This is an automated email from the ASF dual-hosted git repository.

jlowe pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 6236de9  Revert "HDFS-14084. Need for more stats in DFSClient. 
Contributed by Pranay Singh."
6236de9 is described below

commit 6236de9e23d06bcf3e955a7df9e5d5fb9f78e25b
Author: Jason Lowe 
AuthorDate: Wed Jan 9 17:41:52 2019 -0600

Revert "HDFS-14084. Need for more stats in DFSClient. Contributed by Pranay 
Singh."

This reverts commit 1f39eae7e6f59206b86f96063ffb2ebe15a9cbe1.
---
 .../main/java/org/apache/hadoop/ipc/Client.java| 25 --
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   | 16 +++---
 .../hadoop/ipc/metrics/RpcDetailedMetrics.java | 12 ---
 3 files changed, 7 insertions(+), 46 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index f01ac30..07a2f13 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
-import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.RPC.RpcKind;
 import org.apache.hadoop.ipc.Server.AuthProtocol;
 import 
org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
@@ -87,7 +86,6 @@ import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 public class Client implements AutoCloseable {
   
   public static final Logger LOG = LoggerFactory.getLogger(Client.class);
-  private final RpcDetailedMetrics rpcDetailedMetrics;
 
   /** A counter for generating call IDs. */
   private static final AtomicInteger callIdCounter = new AtomicInteger();
@@ -211,24 +209,6 @@ public class Client implements AutoCloseable {
   };
   
   /**
-   * Update a particular metric by recording the processing
-   * time of the metric.
-   *
-   * @param name Metric name
-   * @param processingTime time spent in processing the metric.
-   */
-  public void updateMetrics(String name, long processingTime) {
-rpcDetailedMetrics.addProcessingTime(name, processingTime);
-  }
-
-  /**
-   * Get the RpcDetailedMetrics associated with the Client.
-   */
-  public RpcDetailedMetrics getRpcDetailedMetrics() {
-return rpcDetailedMetrics;
-  }
-
-  /**
* set the ping interval value in configuration
* 
* @param conf Configuration
@@ -1321,11 +1301,6 @@ public class Client implements AutoCloseable {
 this.maxAsyncCalls = conf.getInt(
 CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
 CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT);
-/**
- * Create with port of -1, dummy port since the function
- * takes default argument.
- */
-this.rpcDetailedMetrics = RpcDetailedMetrics.create(-1);
   }
 
   /**
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index e21c6d1..70fde60 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -49,8 +49,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.metrics2.MetricStringBuilder;
-import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 /**
  * RPC Engine for for protobuf based RPCs.
@@ -192,7 +190,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = System.currentTimeMillis();
+startTime = Time.now();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -247,16 +245,8 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = System.currentTimeMillis() - startTime;
-if (callTime > 0) {
-  MetricStringBuilder rb =
-  new MetricStringBuilder(null, "", " = ", "\n");
-  client.updateMetrics(method.getName(), callTime);
-  MutableRatesWithAggregation rates =
-  client.getRpcDetailedMetrics().getMutableRates();
-  rates.snapshot(rb, t

[hadoop] branch trunk updated: Revert "HDFS-14084. Need for more stats in DFSClient. Contributed by Pranay Singh."

2019-01-09 Thread jlowe
This is an automated email from the ASF dual-hosted git repository.

jlowe pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c634589  Revert "HDFS-14084. Need for more stats in DFSClient. 
Contributed by Pranay Singh."
c634589 is described below

commit c634589ab2d602bf80ba513f88d44544e9bedcb5
Author: Jason Lowe 
AuthorDate: Wed Jan 9 17:24:58 2019 -0600

Revert "HDFS-14084. Need for more stats in DFSClient. Contributed by Pranay 
Singh."

This reverts commit ecdeaa7e6ad43555031aed032e6ba7a14a17d7bc.
---
 .../main/java/org/apache/hadoop/ipc/Client.java| 25 --
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   | 16 +++---
 .../hadoop/ipc/metrics/RpcDetailedMetrics.java | 12 ---
 3 files changed, 7 insertions(+), 46 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 124d068..ea79887 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
-import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.RPC.RpcKind;
 import org.apache.hadoop.ipc.Server.AuthProtocol;
 import 
org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
@@ -87,7 +86,6 @@ import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 public class Client implements AutoCloseable {
   
   public static final Logger LOG = LoggerFactory.getLogger(Client.class);
-  private final RpcDetailedMetrics rpcDetailedMetrics;
 
   /** A counter for generating call IDs. */
   private static final AtomicInteger callIdCounter = new AtomicInteger();
@@ -211,24 +209,6 @@ public class Client implements AutoCloseable {
   };
   
   /**
-   * Update a particular metric by recording the processing
-   * time of the metric.
-   *
-   * @param name Metric name
-   * @param processingTime time spent in processing the metric.
-   */
-  public void updateMetrics(String name, long processingTime) {
-rpcDetailedMetrics.addProcessingTime(name, processingTime);
-  }
-
-  /**
-   * Get the RpcDetailedMetrics associated with the Client.
-   */
-  public RpcDetailedMetrics getRpcDetailedMetrics() {
-return rpcDetailedMetrics;
-  }
-
-  /**
* set the ping interval value in configuration
* 
* @param conf Configuration
@@ -1334,11 +1314,6 @@ public class Client implements AutoCloseable {
 this.maxAsyncCalls = conf.getInt(
 CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
 CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT);
-/**
- * Create with port of -1, dummy port since the function
- * takes default argument.
- */
-this.rpcDetailedMetrics = RpcDetailedMetrics.create(-1);
   }
 
   /**
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index e52dc66..5548566 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -49,8 +49,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.metrics2.MetricStringBuilder;
-import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 /**
  * RPC Engine for for protobuf based RPCs.
@@ -197,7 +195,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = System.currentTimeMillis();
+startTime = Time.now();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -252,16 +250,8 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = System.currentTimeMillis() - startTime;
-if (callTime > 0) {
-  MetricStringBuilder rb =
-  new MetricStringBuilder(null, "", " = ", "\n");
-  client.updateMetrics(method.getName(), callTime);
-  MutableRatesWithAggregation rates =
-  client.getRpcDetailedMetrics().getMutableRates();
-  rates.snapshot(rb, t

[hadoop] branch trunk updated: YARN-6523. Optimize system credentials sent in node heartbeat responses. Contributed by Manikandan R

2019-01-08 Thread jlowe
This is an automated email from the ASF dual-hosted git repository.

jlowe pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6a92346  YARN-6523. Optimize system credentials sent in node heartbeat 
responses. Contributed by Manikandan R
6a92346 is described below

commit 6a923464afa6b635f505be5d5b2414d47d570f99
Author: Jason Lowe 
AuthorDate: Tue Jan 8 16:51:06 2019 -0600

YARN-6523. Optimize system credentials sent in node heartbeat responses. 
Contributed by Manikandan R
---
 .../yarn/api/records/impl/pb/ProtoUtils.java   |  18 +++
 .../api/protocolrecords/NodeHeartbeatRequest.java  |   4 +
 .../api/protocolrecords/NodeHeartbeatResponse.java |  21 +--
 .../impl/pb/NodeHeartbeatRequestPBImpl.java|  13 ++
 .../impl/pb/NodeHeartbeatResponsePBImpl.java   |  91 
 .../yarn/server/utils/YarnServerBuilderUtils.java  |  68 +
 .../proto/yarn_server_common_service_protos.proto  |   2 +
 .../hadoop/yarn/TestYarnServerApiClasses.java  |  55 +++-
 .../api/protocolrecords/TestProtocolRecords.java   |  21 +--
 .../server/nodemanager/NodeStatusUpdaterImpl.java  |   9 +-
 .../server/nodemanager/TestNodeStatusUpdater.java  |  32 -
 .../resourcemanager/RMActiveServiceContext.java|  29 +++-
 .../yarn/server/resourcemanager/RMContext.java |  11 +-
 .../yarn/server/resourcemanager/RMContextImpl.java |  15 +-
 .../resourcemanager/ResourceTrackerService.java|  33 -
 .../security/DelegationTokenRenewer.java   |  17 ++-
 .../hadoop/yarn/server/resourcemanager/MockNM.java |   3 +
 .../yarn/server/resourcemanager/MockNodes.java |  47 +-
 .../TestResourceTrackerService.java| 157 +
 .../security/TestDelegationTokenRenewer.java   | 110 +--
 20 files changed, 632 insertions(+), 124 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
index 4008a97..f175cf3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
@@ -30,6 +30,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.AMCommand;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -62,6 +63,7 @@ import 
org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementCon
 import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnProtos.AMCommandProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAccessTypeProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
 import 
org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationTimeoutTypeProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
@@ -92,12 +94,16 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateTypeProto;
 import org.apache.hadoop.yarn.server.api.ContainerType;
 
+import com.google.common.collect.Interner;
+import com.google.common.collect.Interners;
 import com.google.protobuf.ByteString;
 
 @Private
 @Unstable
 public class ProtoUtils {
 
+  public static final Interner BYTE_STRING_INTERNER =
+  Interners.newWeakInterner();
 
   /*
* ContainerState
@@ -578,6 +584,18 @@ public class ProtoUtils {
   TimedPlacementConstraintProto.DelayUnit u) {
 return TimedPlacementConstraint.DelayUnit.valueOf(u.name());
   }
+
+  /*
+   * ApplicationId
+   */
+  public static ApplicationIdPBImpl convertFromProtoFormat(
+  ApplicationIdProto p) {
+return new ApplicationIdPBImpl(p);
+  }
+
+  public static ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+return ((ApplicationIdPBImpl) t).getProto();
+  }
 }
 
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
index 4f99225

hadoop git commit: HADOOP-15973. Configuration: Included properties are not cached if resource is a stream. Contributed by Eric Payne

2018-12-20 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 90dc66553 -> 91a4fb54f


HADOOP-15973. Configuration: Included properties are not cached if resource is 
a stream. Contributed by Eric Payne

(cherry picked from commit d62bfaf1a409c7cd034f9e76e97a61a77d517a25)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91a4fb54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91a4fb54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91a4fb54

Branch: refs/heads/branch-2.9
Commit: 91a4fb54f13a29dc2af76fc7582ee79a063127ba
Parents: 90dc665
Author: Jason Lowe 
Authored: Thu Dec 20 10:02:18 2018 -0600
Committer: Jason Lowe 
Committed: Thu Dec 20 10:03:01 2018 -0600

--
 .../org/apache/hadoop/conf/Configuration.java   |  6 +-
 .../apache/hadoop/conf/TestConfiguration.java   | 99 
 2 files changed, 103 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a4fb54/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 235cc50..9ee1baf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2848,10 +2848,11 @@ public class Configuration implements 
Iterable>,
 // otherwise fallback to a file resource
 // xi:include are treated as inline and retain current source
 URL include = getResource(confInclude);
+Properties tmpProps = new Properties();
 if (include != null) {
   Resource classpathResource = new Resource(include, name,
   wrapper.isParserRestricted());
-  loadResource(properties, classpathResource, quiet);
+  loadResource(tmpProps, classpathResource, quiet);
 } else {
   URL url;
   try {
@@ -2873,8 +2874,9 @@ public class Configuration implements 
Iterable>,
   }
   Resource uriResource = new Resource(url, name,
   wrapper.isParserRestricted());
-  loadResource(properties, uriResource, quiet);
+  loadResource(tmpProps, uriResource, quiet);
 }
+toAddTo.putAll(tmpProps);
 break;
   case "fallback":
 fallbackEntered = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a4fb54/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 214be63..6fcff58 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -17,10 +17,12 @@
  */
 package org.apache.hadoop.conf;
 
+import java.io.BufferedInputStream;
 import java.io.BufferedWriter;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.FileWriter;
 import java.io.IOException;
@@ -773,6 +775,103 @@ public class TestConfiguration extends TestCase {
 tearDown();
   }
 
+  // When a resource is parsed as an input stream the first time, included
+  // properties are saved within the config. However, the included properties
+  // are not cached in the resource object. So, if an additional resource is
+  // added after the config is parsed the first time, the config loses the
+  // prperties that were included from the first resource.
+  public void testIncludesFromInputStreamWhenResourceAdded() throws Exception {
+tearDown();
+
+// CONFIG includes CONFIG2. CONFIG2 includes CONFIG_FOR_ENUM
+out=new BufferedWriter(new FileWriter(CONFIG_FOR_ENUM));
+startConfig();
+appendProperty("e", "SecondLevelInclude");
+appendProperty("f", "SecondLevelInclude");
+endConfig();
+
+out=new BufferedWriter(new FileWriter(CONFIG2));
+startConfig();
+startInclude(CONFIG_FOR_ENUM);
+endInclude();
+appendProperty("c","FirstLevelInclude");
+appendProperty("d","FirstLevelInclude");
+endConfig();
+
+out=new BufferedWriter(new 

hadoop git commit: HADOOP-15973. Configuration: Included properties are not cached if resource is a stream. Contributed by Eric Payne

2018-12-20 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 500b2a0ca -> d62bfaf1a


HADOOP-15973. Configuration: Included properties are not cached if resource is 
a stream. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d62bfaf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d62bfaf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d62bfaf1

Branch: refs/heads/branch-2
Commit: d62bfaf1a409c7cd034f9e76e97a61a77d517a25
Parents: 500b2a0
Author: Jason Lowe 
Authored: Thu Dec 20 10:02:18 2018 -0600
Committer: Jason Lowe 
Committed: Thu Dec 20 10:02:18 2018 -0600

--
 .../org/apache/hadoop/conf/Configuration.java   |  6 +-
 .../apache/hadoop/conf/TestConfiguration.java   | 99 
 2 files changed, 103 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62bfaf1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 235cc50..9ee1baf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2848,10 +2848,11 @@ public class Configuration implements 
Iterable>,
 // otherwise fallback to a file resource
 // xi:include are treated as inline and retain current source
 URL include = getResource(confInclude);
+Properties tmpProps = new Properties();
 if (include != null) {
   Resource classpathResource = new Resource(include, name,
   wrapper.isParserRestricted());
-  loadResource(properties, classpathResource, quiet);
+  loadResource(tmpProps, classpathResource, quiet);
 } else {
   URL url;
   try {
@@ -2873,8 +2874,9 @@ public class Configuration implements 
Iterable>,
   }
   Resource uriResource = new Resource(url, name,
   wrapper.isParserRestricted());
-  loadResource(properties, uriResource, quiet);
+  loadResource(tmpProps, uriResource, quiet);
 }
+toAddTo.putAll(tmpProps);
 break;
   case "fallback":
 fallbackEntered = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62bfaf1/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 214be63..6fcff58 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -17,10 +17,12 @@
  */
 package org.apache.hadoop.conf;
 
+import java.io.BufferedInputStream;
 import java.io.BufferedWriter;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.FileWriter;
 import java.io.IOException;
@@ -773,6 +775,103 @@ public class TestConfiguration extends TestCase {
 tearDown();
   }
 
+  // When a resource is parsed as an input stream the first time, included
+  // properties are saved within the config. However, the included properties
+  // are not cached in the resource object. So, if an additional resource is
+  // added after the config is parsed the first time, the config loses the
+  // prperties that were included from the first resource.
+  public void testIncludesFromInputStreamWhenResourceAdded() throws Exception {
+tearDown();
+
+// CONFIG includes CONFIG2. CONFIG2 includes CONFIG_FOR_ENUM
+out=new BufferedWriter(new FileWriter(CONFIG_FOR_ENUM));
+startConfig();
+appendProperty("e", "SecondLevelInclude");
+appendProperty("f", "SecondLevelInclude");
+endConfig();
+
+out=new BufferedWriter(new FileWriter(CONFIG2));
+startConfig();
+startInclude(CONFIG_FOR_ENUM);
+endInclude();
+appendProperty("c","FirstLevelInclude");
+appendProperty("d","FirstLevelInclude");
+endConfig();
+
+out=new BufferedWriter(new FileWriter(CONFIG));
+startConfig();
+startInclude(CONFIG2);
+

hadoop git commit: HADOOP-15973. Configuration: Included properties are not cached if resource is a stream. Contributed by Eric Payne

2018-12-20 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e38e184a1 -> c9fdf7503


HADOOP-15973. Configuration: Included properties are not cached if resource is 
a stream. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9fdf750
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9fdf750
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9fdf750

Branch: refs/heads/branch-3.0
Commit: c9fdf7503e0a098432a8708043841c17c07812b6
Parents: e38e184
Author: Jason Lowe 
Authored: Thu Dec 20 10:00:17 2018 -0600
Committer: Jason Lowe 
Committed: Thu Dec 20 10:00:17 2018 -0600

--
 .../org/apache/hadoop/conf/Configuration.java   |  78 --
 .../apache/hadoop/conf/TestConfiguration.java   | 101 +++
 2 files changed, 150 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9fdf750/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 0699f83..de6d89a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -41,7 +41,6 @@ import java.io.Writer;
 import java.lang.ref.WeakReference;
 import java.net.InetSocketAddress;
 import java.net.JarURLConnection;
-import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.ArrayList;
@@ -2904,36 +2903,15 @@ public class Configuration implements 
Iterable>,
 try {
   Object resource = wrapper.getResource();
   name = wrapper.getName();
-  XMLStreamReader2 reader = null;
   boolean returnCachedProperties = false;
-  boolean isRestricted = wrapper.isParserRestricted();
-
-  if (resource instanceof URL) {  // an URL resource
-reader = (XMLStreamReader2)parse((URL)resource, isRestricted);
-  } else if (resource instanceof String) {// a CLASSPATH resource
-URL url = getResource((String)resource);
-reader = (XMLStreamReader2)parse(url, isRestricted);
-  } else if (resource instanceof Path) {  // a file resource
-// Can't use FileSystem API or we get an infinite loop
-// since FileSystem uses Configuration API.  Use java.io.File instead.
-File file = new File(((Path)resource).toUri().getPath())
-  .getAbsoluteFile();
-if (file.exists()) {
-  if (!quiet) {
-LOG.debug("parsing File " + file);
-  }
-  reader = (XMLStreamReader2)parse(new BufferedInputStream(
-  new FileInputStream(file)), ((Path)resource).toString(),
-  isRestricted);
-}
-  } else if (resource instanceof InputStream) {
-reader = (XMLStreamReader2)parse((InputStream)resource, null,
-isRestricted);
+
+  if (resource instanceof InputStream) {
 returnCachedProperties = true;
   } else if (resource instanceof Properties) {
 overlay(properties, (Properties)resource);
   }
 
+  XMLStreamReader2 reader = getStreamReader(wrapper, quiet);
   if (reader == null) {
 if (quiet) {
   return null;
@@ -2966,6 +2944,36 @@ public class Configuration implements 
Iterable>,
 }
   }
 
+  private XMLStreamReader2 getStreamReader(Resource wrapper, boolean quiet)
+  throws XMLStreamException, IOException {
+Object resource = wrapper.getResource();
+boolean isRestricted = wrapper.isParserRestricted();
+XMLStreamReader2 reader = null;
+if (resource instanceof URL) {  // an URL resource
+  reader  = (XMLStreamReader2)parse((URL)resource, isRestricted);
+} else if (resource instanceof String) {// a CLASSPATH resource
+  URL url = getResource((String)resource);
+  reader = (XMLStreamReader2)parse(url, isRestricted);
+} else if (resource instanceof Path) {  // a file resource
+  // Can't use FileSystem API or we get an infinite loop
+  // since FileSystem uses Configuration API.  Use java.io.File instead.
+  File file = new File(((Path)resource).toUri().getPath())
+.getAbsoluteFile();
+  if (file.exists()) {
+if (!quiet) {
+  LOG.debug("parsing File " + file);
+}
+reader = (XMLStreamReader2)parse(new BufferedInputStream(
+new FileInputStream(file)), ((Path)resource).toString(),
+isRestricted);
+  }
+} else 

hadoop git commit: HADOOP-15973. Configuration: Included properties are not cached if resource is a stream. Contributed by Eric Payne

2018-12-20 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 9f30916a1 -> 5c5c4438a


HADOOP-15973. Configuration: Included properties are not cached if resource is 
a stream. Contributed by Eric Payne

(cherry picked from commit 3961690037fa4274a3640104b3c344e8626caa10)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c5c4438
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c5c4438
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c5c4438

Branch: refs/heads/branch-3.1
Commit: 5c5c4438adc7dd5d05c99a06ca11b8bb741608d6
Parents: 9f30916
Author: Jason Lowe 
Authored: Thu Dec 20 09:29:48 2018 -0600
Committer: Jason Lowe 
Committed: Thu Dec 20 09:56:12 2018 -0600

--
 .../org/apache/hadoop/conf/Configuration.java   |  78 +--
 .../apache/hadoop/conf/TestConfiguration.java   | 100 +++
 2 files changed, 149 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5c4438/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index bde2bab..af55b7a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -41,7 +41,6 @@ import java.io.Writer;
 import java.lang.ref.WeakReference;
 import java.net.InetSocketAddress;
 import java.net.JarURLConnection;
-import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.ArrayList;
@@ -2942,36 +2941,15 @@ public class Configuration implements 
Iterable>,
 try {
   Object resource = wrapper.getResource();
   name = wrapper.getName();
-  XMLStreamReader2 reader = null;
   boolean returnCachedProperties = false;
-  boolean isRestricted = wrapper.isParserRestricted();
-
-  if (resource instanceof URL) {  // an URL resource
-reader = (XMLStreamReader2)parse((URL)resource, isRestricted);
-  } else if (resource instanceof String) {// a CLASSPATH resource
-URL url = getResource((String)resource);
-reader = (XMLStreamReader2)parse(url, isRestricted);
-  } else if (resource instanceof Path) {  // a file resource
-// Can't use FileSystem API or we get an infinite loop
-// since FileSystem uses Configuration API.  Use java.io.File instead.
-File file = new File(((Path)resource).toUri().getPath())
-  .getAbsoluteFile();
-if (file.exists()) {
-  if (!quiet) {
-LOG.debug("parsing File " + file);
-  }
-  reader = (XMLStreamReader2)parse(new BufferedInputStream(
-  new FileInputStream(file)), ((Path)resource).toString(),
-  isRestricted);
-}
-  } else if (resource instanceof InputStream) {
-reader = (XMLStreamReader2)parse((InputStream)resource, null,
-isRestricted);
+
+  if (resource instanceof InputStream) {
 returnCachedProperties = true;
   } else if (resource instanceof Properties) {
 overlay(properties, (Properties)resource);
   }
 
+  XMLStreamReader2 reader = getStreamReader(wrapper, quiet);
   if (reader == null) {
 if (quiet) {
   return null;
@@ -3004,6 +2982,36 @@ public class Configuration implements 
Iterable>,
 }
   }
 
+  private XMLStreamReader2 getStreamReader(Resource wrapper, boolean quiet)
+  throws XMLStreamException, IOException {
+Object resource = wrapper.getResource();
+boolean isRestricted = wrapper.isParserRestricted();
+XMLStreamReader2 reader = null;
+if (resource instanceof URL) {  // an URL resource
+  reader  = (XMLStreamReader2)parse((URL)resource, isRestricted);
+} else if (resource instanceof String) {// a CLASSPATH resource
+  URL url = getResource((String)resource);
+  reader = (XMLStreamReader2)parse(url, isRestricted);
+} else if (resource instanceof Path) {  // a file resource
+  // Can't use FileSystem API or we get an infinite loop
+  // since FileSystem uses Configuration API.  Use java.io.File instead.
+  File file = new File(((Path)resource).toUri().getPath())
+.getAbsoluteFile();
+  if (file.exists()) {
+if (!quiet) {
+  LOG.debug("parsing File " + file);
+}
+reader = (XMLStreamReader2)parse(new BufferedInputStream(
+new FileInputStream(file)), 

hadoop git commit: HADOOP-15973. Configuration: Included properties are not cached if resource is a stream. Contributed by Eric Payne

2018-12-20 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2.0 829b530a0 -> fcbb22c69


HADOOP-15973. Configuration: Included properties are not cached if resource is 
a stream. Contributed by Eric Payne

(cherry picked from commit 3961690037fa4274a3640104b3c344e8626caa10)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fcbb22c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fcbb22c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fcbb22c6

Branch: refs/heads/branch-3.2.0
Commit: fcbb22c690d69bca26f94c8ff138e271e08d8a86
Parents: 829b530
Author: Jason Lowe 
Authored: Thu Dec 20 09:29:48 2018 -0600
Committer: Jason Lowe 
Committed: Thu Dec 20 09:46:41 2018 -0600

--
 .../org/apache/hadoop/conf/Configuration.java   |  78 +--
 .../apache/hadoop/conf/TestConfiguration.java   | 100 +++
 2 files changed, 149 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcbb22c6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index a78e311..50810ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -41,7 +41,6 @@ import java.io.Writer;
 import java.lang.ref.WeakReference;
 import java.net.InetSocketAddress;
 import java.net.JarURLConnection;
-import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.ArrayList;
@@ -2942,36 +2941,15 @@ public class Configuration implements 
Iterable>,
 try {
   Object resource = wrapper.getResource();
   name = wrapper.getName();
-  XMLStreamReader2 reader = null;
   boolean returnCachedProperties = false;
-  boolean isRestricted = wrapper.isParserRestricted();
-
-  if (resource instanceof URL) {  // an URL resource
-reader = (XMLStreamReader2)parse((URL)resource, isRestricted);
-  } else if (resource instanceof String) {// a CLASSPATH resource
-URL url = getResource((String)resource);
-reader = (XMLStreamReader2)parse(url, isRestricted);
-  } else if (resource instanceof Path) {  // a file resource
-// Can't use FileSystem API or we get an infinite loop
-// since FileSystem uses Configuration API.  Use java.io.File instead.
-File file = new File(((Path)resource).toUri().getPath())
-  .getAbsoluteFile();
-if (file.exists()) {
-  if (!quiet) {
-LOG.debug("parsing File " + file);
-  }
-  reader = (XMLStreamReader2)parse(new BufferedInputStream(
-  new FileInputStream(file)), ((Path)resource).toString(),
-  isRestricted);
-}
-  } else if (resource instanceof InputStream) {
-reader = (XMLStreamReader2)parse((InputStream)resource, null,
-isRestricted);
+
+  if (resource instanceof InputStream) {
 returnCachedProperties = true;
   } else if (resource instanceof Properties) {
 overlay(properties, (Properties)resource);
   }
 
+  XMLStreamReader2 reader = getStreamReader(wrapper, quiet);
   if (reader == null) {
 if (quiet) {
   return null;
@@ -3004,6 +2982,36 @@ public class Configuration implements 
Iterable>,
 }
   }
 
+  private XMLStreamReader2 getStreamReader(Resource wrapper, boolean quiet)
+  throws XMLStreamException, IOException {
+Object resource = wrapper.getResource();
+boolean isRestricted = wrapper.isParserRestricted();
+XMLStreamReader2 reader = null;
+if (resource instanceof URL) {  // an URL resource
+  reader  = (XMLStreamReader2)parse((URL)resource, isRestricted);
+} else if (resource instanceof String) {// a CLASSPATH resource
+  URL url = getResource((String)resource);
+  reader = (XMLStreamReader2)parse(url, isRestricted);
+} else if (resource instanceof Path) {  // a file resource
+  // Can't use FileSystem API or we get an infinite loop
+  // since FileSystem uses Configuration API.  Use java.io.File instead.
+  File file = new File(((Path)resource).toUri().getPath())
+.getAbsoluteFile();
+  if (file.exists()) {
+if (!quiet) {
+  LOG.debug("parsing File " + file);
+}
+reader = (XMLStreamReader2)parse(new BufferedInputStream(
+new FileInputStream(file)), 

hadoop git commit: HADOOP-15973. Configuration: Included properties are not cached if resource is a stream. Contributed by Eric Payne

2018-12-20 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 1b941a680 -> 9c89e2ea7


HADOOP-15973. Configuration: Included properties are not cached if resource is 
a stream. Contributed by Eric Payne

(cherry picked from commit 3961690037fa4274a3640104b3c344e8626caa10)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c89e2ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c89e2ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c89e2ea

Branch: refs/heads/branch-3.2
Commit: 9c89e2ea76a8d20f7a65cb1e54a0ad2da368649b
Parents: 1b941a6
Author: Jason Lowe 
Authored: Thu Dec 20 09:29:48 2018 -0600
Committer: Jason Lowe 
Committed: Thu Dec 20 09:33:22 2018 -0600

--
 .../org/apache/hadoop/conf/Configuration.java   |  78 +--
 .../apache/hadoop/conf/TestConfiguration.java   | 100 +++
 2 files changed, 149 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c89e2ea/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index a78e311..50810ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -41,7 +41,6 @@ import java.io.Writer;
 import java.lang.ref.WeakReference;
 import java.net.InetSocketAddress;
 import java.net.JarURLConnection;
-import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.ArrayList;
@@ -2942,36 +2941,15 @@ public class Configuration implements 
Iterable>,
 try {
   Object resource = wrapper.getResource();
   name = wrapper.getName();
-  XMLStreamReader2 reader = null;
   boolean returnCachedProperties = false;
-  boolean isRestricted = wrapper.isParserRestricted();
-
-  if (resource instanceof URL) {  // an URL resource
-reader = (XMLStreamReader2)parse((URL)resource, isRestricted);
-  } else if (resource instanceof String) {// a CLASSPATH resource
-URL url = getResource((String)resource);
-reader = (XMLStreamReader2)parse(url, isRestricted);
-  } else if (resource instanceof Path) {  // a file resource
-// Can't use FileSystem API or we get an infinite loop
-// since FileSystem uses Configuration API.  Use java.io.File instead.
-File file = new File(((Path)resource).toUri().getPath())
-  .getAbsoluteFile();
-if (file.exists()) {
-  if (!quiet) {
-LOG.debug("parsing File " + file);
-  }
-  reader = (XMLStreamReader2)parse(new BufferedInputStream(
-  new FileInputStream(file)), ((Path)resource).toString(),
-  isRestricted);
-}
-  } else if (resource instanceof InputStream) {
-reader = (XMLStreamReader2)parse((InputStream)resource, null,
-isRestricted);
+
+  if (resource instanceof InputStream) {
 returnCachedProperties = true;
   } else if (resource instanceof Properties) {
 overlay(properties, (Properties)resource);
   }
 
+  XMLStreamReader2 reader = getStreamReader(wrapper, quiet);
   if (reader == null) {
 if (quiet) {
   return null;
@@ -3004,6 +2982,36 @@ public class Configuration implements 
Iterable>,
 }
   }
 
+  private XMLStreamReader2 getStreamReader(Resource wrapper, boolean quiet)
+  throws XMLStreamException, IOException {
+Object resource = wrapper.getResource();
+boolean isRestricted = wrapper.isParserRestricted();
+XMLStreamReader2 reader = null;
+if (resource instanceof URL) {  // an URL resource
+  reader  = (XMLStreamReader2)parse((URL)resource, isRestricted);
+} else if (resource instanceof String) {// a CLASSPATH resource
+  URL url = getResource((String)resource);
+  reader = (XMLStreamReader2)parse(url, isRestricted);
+} else if (resource instanceof Path) {  // a file resource
+  // Can't use FileSystem API or we get an infinite loop
+  // since FileSystem uses Configuration API.  Use java.io.File instead.
+  File file = new File(((Path)resource).toUri().getPath())
+.getAbsoluteFile();
+  if (file.exists()) {
+if (!quiet) {
+  LOG.debug("parsing File " + file);
+}
+reader = (XMLStreamReader2)parse(new BufferedInputStream(
+new FileInputStream(file)), 

hadoop git commit: HADOOP-15973. Configuration: Included properties are not cached if resource is a stream. Contributed by Eric Payne

2018-12-20 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk ea621fa08 -> 396169003


HADOOP-15973. Configuration: Included properties are not cached if resource is 
a stream. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39616900
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39616900
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39616900

Branch: refs/heads/trunk
Commit: 3961690037fa4274a3640104b3c344e8626caa10
Parents: ea621fa
Author: Jason Lowe 
Authored: Thu Dec 20 09:29:48 2018 -0600
Committer: Jason Lowe 
Committed: Thu Dec 20 09:29:48 2018 -0600

--
 .../org/apache/hadoop/conf/Configuration.java   |  78 +--
 .../apache/hadoop/conf/TestConfiguration.java   | 100 +++
 2 files changed, 149 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39616900/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 478e56e..c1720e6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -41,7 +41,6 @@ import java.io.Writer;
 import java.lang.ref.WeakReference;
 import java.net.InetSocketAddress;
 import java.net.JarURLConnection;
-import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.ArrayList;
@@ -2972,36 +2971,15 @@ public class Configuration implements 
Iterable>,
 try {
   Object resource = wrapper.getResource();
   name = wrapper.getName();
-  XMLStreamReader2 reader = null;
   boolean returnCachedProperties = false;
-  boolean isRestricted = wrapper.isParserRestricted();
-
-  if (resource instanceof URL) {  // an URL resource
-reader = (XMLStreamReader2)parse((URL)resource, isRestricted);
-  } else if (resource instanceof String) {// a CLASSPATH resource
-URL url = getResource((String)resource);
-reader = (XMLStreamReader2)parse(url, isRestricted);
-  } else if (resource instanceof Path) {  // a file resource
-// Can't use FileSystem API or we get an infinite loop
-// since FileSystem uses Configuration API.  Use java.io.File instead.
-File file = new File(((Path)resource).toUri().getPath())
-  .getAbsoluteFile();
-if (file.exists()) {
-  if (!quiet) {
-LOG.debug("parsing File " + file);
-  }
-  reader = (XMLStreamReader2)parse(new BufferedInputStream(
-  new FileInputStream(file)), ((Path)resource).toString(),
-  isRestricted);
-}
-  } else if (resource instanceof InputStream) {
-reader = (XMLStreamReader2)parse((InputStream)resource, null,
-isRestricted);
+
+  if (resource instanceof InputStream) {
 returnCachedProperties = true;
   } else if (resource instanceof Properties) {
 overlay(properties, (Properties)resource);
   }
 
+  XMLStreamReader2 reader = getStreamReader(wrapper, quiet);
   if (reader == null) {
 if (quiet) {
   return null;
@@ -3034,6 +3012,36 @@ public class Configuration implements 
Iterable>,
 }
   }
 
+  private XMLStreamReader2 getStreamReader(Resource wrapper, boolean quiet)
+  throws XMLStreamException, IOException {
+Object resource = wrapper.getResource();
+boolean isRestricted = wrapper.isParserRestricted();
+XMLStreamReader2 reader = null;
+if (resource instanceof URL) {  // an URL resource
+  reader  = (XMLStreamReader2)parse((URL)resource, isRestricted);
+} else if (resource instanceof String) {// a CLASSPATH resource
+  URL url = getResource((String)resource);
+  reader = (XMLStreamReader2)parse(url, isRestricted);
+} else if (resource instanceof Path) {  // a file resource
+  // Can't use FileSystem API or we get an infinite loop
+  // since FileSystem uses Configuration API.  Use java.io.File instead.
+  File file = new File(((Path)resource).toUri().getPath())
+.getAbsoluteFile();
+  if (file.exists()) {
+if (!quiet) {
+  LOG.debug("parsing File " + file);
+}
+reader = (XMLStreamReader2)parse(new BufferedInputStream(
+new FileInputStream(file)), ((Path)resource).toString(),
+isRestricted);
+  }
+} else if 

hadoop git commit: MAPREDUCE-7159. FrameworkUploader: ensure proper permissions of generated framework tar.gz if restrictive umask is used. Contributed by Peter Bacsko

2018-12-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 7ef4ff190 -> 6e6f43afd


MAPREDUCE-7159. FrameworkUploader: ensure proper permissions of generated 
framework tar.gz if restrictive umask is used. Contributed by Peter Bacsko

(cherry picked from commit 9886c27c71147a2bcef87b42ff21c2bbe98c8ab4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e6f43af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e6f43af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e6f43af

Branch: refs/heads/branch-3.1
Commit: 6e6f43afd8ecf9b597ce2c8a9f098486d2e0742a
Parents: 7ef4ff1
Author: Jason Lowe 
Authored: Thu Dec 6 14:48:17 2018 -0600
Committer: Jason Lowe 
Committed: Thu Dec 6 15:02:00 2018 -0600

--
 .../hadoop-mapreduce-client-uploader/pom.xml|  6 ++
 .../mapred/uploader/FrameworkUploader.java  | 52 ++
 .../mapred/uploader/TestFrameworkUploader.java  | 58 
 3 files changed, 116 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e6f43af/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
index 489845a..685fce5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
@@ -40,6 +40,12 @@
 
 
 org.apache.hadoop
+hadoop-hdfs
+test-jar
+test
+
+
+org.apache.hadoop
 hadoop-hdfs-client
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e6f43af/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
index 5316f38..c50ae61 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
@@ -26,12 +26,16 @@ import org.apache.commons.compress.archivers.ArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
@@ -71,6 +75,10 @@ public class FrameworkUploader implements Runnable {
   LoggerFactory.getLogger(FrameworkUploader.class);
   private Configuration conf = new Configuration();
 
+  // Minimal required permissions for the uploaded framework
+  private static final FsPermission FRAMEWORK_PERMISSION =
+  new FsPermission(0644);
+
   @VisibleForTesting
   String input = null;
   @VisibleForTesting
@@ -99,6 +107,7 @@ public class FrameworkUploader implements Runnable {
   List blacklistedFiles = new LinkedList<>();
 
   private OutputStream targetStream = null;
+  private FSDataOutputStream fsDataStream = null;
   private String alias = null;
 
   @VisibleForTesting
@@ -203,11 +212,50 @@ public class FrameworkUploader implements Runnable {
 targetStream = fileSystem.create(targetPath, true);
   }
 
+  if (!FRAMEWORK_PERMISSION.equals(
+  FRAMEWORK_PERMISSION.applyUMask(FsPermission.getUMask(conf {
+LOG.info("Modifying permissions to " + FRAMEWORK_PERMISSION);
+fileSystem.setPermission(targetPath, FRAMEWORK_PERMISSION);
+  }
+
+  fsDataStream = 

hadoop git commit: MAPREDUCE-7159. FrameworkUploader: ensure proper permissions of generated framework tar.gz if restrictive umask is used. Contributed by Peter Bacsko

2018-12-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 8c70728f7 -> 298a1f29e


MAPREDUCE-7159. FrameworkUploader: ensure proper permissions of generated 
framework tar.gz if restrictive umask is used. Contributed by Peter Bacsko

(cherry picked from commit 9886c27c71147a2bcef87b42ff21c2bbe98c8ab4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/298a1f29
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/298a1f29
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/298a1f29

Branch: refs/heads/branch-3.2
Commit: 298a1f29e8cd145e41206364fb77230dd187ddfe
Parents: 8c70728
Author: Jason Lowe 
Authored: Thu Dec 6 14:48:17 2018 -0600
Committer: Jason Lowe 
Committed: Thu Dec 6 14:57:45 2018 -0600

--
 .../hadoop-mapreduce-client-uploader/pom.xml|  6 ++
 .../mapred/uploader/FrameworkUploader.java  | 52 ++
 .../mapred/uploader/TestFrameworkUploader.java  | 58 
 3 files changed, 116 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/298a1f29/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
index 79ae97f..82784f0 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
@@ -40,6 +40,12 @@
 
 
 org.apache.hadoop
+hadoop-hdfs
+test-jar
+test
+
+
+org.apache.hadoop
 hadoop-hdfs-client
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/298a1f29/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
index d2116c0..6f914f7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
@@ -26,12 +26,16 @@ import org.apache.commons.compress.archivers.ArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
@@ -71,6 +75,10 @@ public class FrameworkUploader implements Runnable {
   LoggerFactory.getLogger(FrameworkUploader.class);
   private Configuration conf = new Configuration();
 
+  // Minimal required permissions for the uploaded framework
+  private static final FsPermission FRAMEWORK_PERMISSION =
+  new FsPermission(0644);
+
   @VisibleForTesting
   String input = null;
   @VisibleForTesting
@@ -99,6 +107,7 @@ public class FrameworkUploader implements Runnable {
   List blacklistedFiles = new LinkedList<>();
 
   private OutputStream targetStream = null;
+  private FSDataOutputStream fsDataStream = null;
   private String alias = null;
 
   @VisibleForTesting
@@ -203,11 +212,50 @@ public class FrameworkUploader implements Runnable {
 targetStream = fileSystem.create(targetPath, true);
   }
 
+  if (!FRAMEWORK_PERMISSION.equals(
+  FRAMEWORK_PERMISSION.applyUMask(FsPermission.getUMask(conf {
+LOG.info("Modifying permissions to " + FRAMEWORK_PERMISSION);
+fileSystem.setPermission(targetPath, FRAMEWORK_PERMISSION);
+  }
+
+  fsDataStream = 

hadoop git commit: MAPREDUCE-7159. FrameworkUploader: ensure proper permissions of generated framework tar.gz if restrictive umask is used. Contributed by Peter Bacsko

2018-12-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 343aaea2d -> 9886c27c7


MAPREDUCE-7159. FrameworkUploader: ensure proper permissions of generated 
framework tar.gz if restrictive umask is used. Contributed by Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9886c27c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9886c27c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9886c27c

Branch: refs/heads/trunk
Commit: 9886c27c71147a2bcef87b42ff21c2bbe98c8ab4
Parents: 343aaea
Author: Jason Lowe 
Authored: Thu Dec 6 14:48:17 2018 -0600
Committer: Jason Lowe 
Committed: Thu Dec 6 14:48:17 2018 -0600

--
 .../hadoop-mapreduce-client-uploader/pom.xml|  6 ++
 .../mapred/uploader/FrameworkUploader.java  | 52 ++
 .../mapred/uploader/TestFrameworkUploader.java  | 58 
 3 files changed, 116 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9886c27c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
index 5d53592..5c9a24f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
@@ -40,6 +40,12 @@
 
 
 org.apache.hadoop
+hadoop-hdfs
+test-jar
+test
+
+
+org.apache.hadoop
 hadoop-hdfs-client
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9886c27c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
index d2116c0..6f914f7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
@@ -26,12 +26,16 @@ import org.apache.commons.compress.archivers.ArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
@@ -71,6 +75,10 @@ public class FrameworkUploader implements Runnable {
   LoggerFactory.getLogger(FrameworkUploader.class);
   private Configuration conf = new Configuration();
 
+  // Minimal required permissions for the uploaded framework
+  private static final FsPermission FRAMEWORK_PERMISSION =
+  new FsPermission(0644);
+
   @VisibleForTesting
   String input = null;
   @VisibleForTesting
@@ -99,6 +107,7 @@ public class FrameworkUploader implements Runnable {
   List blacklistedFiles = new LinkedList<>();
 
   private OutputStream targetStream = null;
+  private FSDataOutputStream fsDataStream = null;
   private String alias = null;
 
   @VisibleForTesting
@@ -203,11 +212,50 @@ public class FrameworkUploader implements Runnable {
 targetStream = fileSystem.create(targetPath, true);
   }
 
+  if (!FRAMEWORK_PERMISSION.equals(
+  FRAMEWORK_PERMISSION.applyUMask(FsPermission.getUMask(conf {
+LOG.info("Modifying permissions to " + FRAMEWORK_PERMISSION);
+fileSystem.setPermission(targetPath, FRAMEWORK_PERMISSION);
+  }
+
+  fsDataStream = (FSDataOutputStream) targetStream;
   if 

hadoop git commit: HADOOP-15974. Upgrade Curator version to 2.13.0 to fix ZK tests. Contributed by Akira Ajisaka

2018-12-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 38335e0b7 -> a7b402b06


HADOOP-15974. Upgrade Curator version to 2.13.0 to fix ZK tests. Contributed by 
Akira Ajisaka

(cherry picked from commit 9287ab364292ce917fc120532681131821ac53ef)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7b402b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7b402b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7b402b0

Branch: refs/heads/branch-3.0
Commit: a7b402b06afca32b83e2dd532ced5bb9700fe273
Parents: 38335e0
Author: Jason Lowe 
Authored: Tue Dec 4 15:44:03 2018 -0600
Committer: Jason Lowe 
Committed: Tue Dec 4 16:52:33 2018 -0600

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7b402b0/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 3957478..df9749a 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -86,7 +86,7 @@
 ${env.HADOOP_PROTOC_PATH}
 
 3.4.13
-2.12.0
+2.13.0
 3.0.0
 3.1.0-RC1
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15974. Upgrade Curator version to 2.13.0 to fix ZK tests. Contributed by Akira Ajisaka

2018-12-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 844f51217 -> 293c992e8


HADOOP-15974. Upgrade Curator version to 2.13.0 to fix ZK tests. Contributed by 
Akira Ajisaka

(cherry picked from commit 9287ab364292ce917fc120532681131821ac53ef)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/293c992e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/293c992e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/293c992e

Branch: refs/heads/branch-3.1
Commit: 293c992e8167e0338a2ac5254cf615cd19ab5741
Parents: 844f512
Author: Jason Lowe 
Authored: Tue Dec 4 15:44:03 2018 -0600
Committer: Jason Lowe 
Committed: Tue Dec 4 16:48:26 2018 -0600

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/293c992e/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index db2b5b4..dfa0e9f 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -83,7 +83,7 @@
 ${env.HADOOP_PROTOC_PATH}
 
 3.4.13
-2.12.0
+2.13.0
 3.0.0
 3.1.0-RC1
 2.1.7


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15974. Upgrade Curator version to 2.13.0 to fix ZK tests. Contributed by Akira Ajisaka

2018-12-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 559cb99bf -> 78b90725a


HADOOP-15974. Upgrade Curator version to 2.13.0 to fix ZK tests. Contributed by 
Akira Ajisaka

(cherry picked from commit 9287ab364292ce917fc120532681131821ac53ef)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78b90725
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78b90725
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78b90725

Branch: refs/heads/branch-3.2
Commit: 78b90725a60c516a6024ab4892b216d89338d042
Parents: 559cb99
Author: Jason Lowe 
Authored: Tue Dec 4 15:44:03 2018 -0600
Committer: Jason Lowe 
Committed: Tue Dec 4 16:44:21 2018 -0600

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78b90725/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index ea0ac04..ead53aa 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -87,7 +87,7 @@
 ${env.HADOOP_PROTOC_PATH}
 
 3.4.13
-2.12.0
+2.13.0
 3.0.0
 3.1.0-RC1
 2.1.7


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15974. Upgrade Curator version to 2.13.0 to fix ZK tests. Contributed by Akira Ajisaka

2018-12-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk aa89492f2 -> 9287ab364


HADOOP-15974. Upgrade Curator version to 2.13.0 to fix ZK tests. Contributed by 
Akira Ajisaka


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9287ab36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9287ab36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9287ab36

Branch: refs/heads/trunk
Commit: 9287ab364292ce917fc120532681131821ac53ef
Parents: aa89492
Author: Jason Lowe 
Authored: Tue Dec 4 15:44:03 2018 -0600
Committer: Jason Lowe 
Committed: Tue Dec 4 15:44:03 2018 -0600

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9287ab36/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 5fd3a56..2b1fc09 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -87,7 +87,7 @@
 ${env.HADOOP_PROTOC_PATH}
 
 3.4.13
-2.12.0
+2.13.0
 3.0.0
 3.1.0-RC1
 2.1.7


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7164. FileOutputCommitter does not report progress while merging paths. Contributed by Kuhu Shukla

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e4dcc3e60 -> 4c238b50d


MAPREDUCE-7164. FileOutputCommitter does not report progress while merging 
paths. Contributed by Kuhu Shukla

(cherry picked from commit 4d8de7ab690ef919b392b12d856482a6a1f2bb3d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c238b50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c238b50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c238b50

Branch: refs/heads/branch-3.0
Commit: 4c238b50dfd83a10923bfd6eb28d7a6ad864a40f
Parents: e4dcc3e
Author: Jason Lowe 
Authored: Wed Nov 28 14:54:59 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 16:10:02 2018 -0600

--
 .../lib/output/FileOutputCommitter.java | 28 +++--
 .../lib/output/TestFileOutputCommitter.java | 33 
 2 files changed, 51 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c238b50/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index 86af2cf..0ed3259 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.Progressable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -389,7 +390,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
 
   if (algorithmVersion == 1) {
 for (FileStatus stat: getAllCommittedTaskPaths(context)) {
-  mergePaths(fs, stat, finalOutput);
+  mergePaths(fs, stat, finalOutput, context);
 }
   }
 
@@ -440,10 +441,11 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
* @throws IOException on any error
*/
   private void mergePaths(FileSystem fs, final FileStatus from,
-  final Path to) throws IOException {
+  final Path to, JobContext context) throws IOException {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Merging data from " + from + " to " + to);
 }
+reportProgress(context);
 FileStatus toStat;
 try {
   toStat = fs.getFileStatus(to);
@@ -467,22 +469,28 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   if (!fs.delete(to, true)) {
 throw new IOException("Failed to delete " + to);
   }
-  renameOrMerge(fs, from, to);
+  renameOrMerge(fs, from, to, context);
 } else {
   //It is a directory so merge everything in the directories
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   } else {
-renameOrMerge(fs, from, to);
+renameOrMerge(fs, from, to, context);
   }
 }
   }
 
-  private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
-  throws IOException {
+  private void reportProgress(JobContext context) {
+if (context instanceof Progressable) {
+  ((Progressable) context).progress();
+}
+  }
+
+  private void renameOrMerge(FileSystem fs, FileStatus from, Path to,
+  JobContext context) throws IOException {
 if (algorithmVersion == 1) {
   if (!fs.rename(from.getPath(), to)) {
 throw new IOException("Failed to rename " + from + " to " + to);
@@ -491,7 +499,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   fs.mkdirs(to);
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   }
@@ -583,7 +591,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   committedTaskPath);
 } else {
   // directly merge everything from 

hadoop git commit: MAPREDUCE-7164. FileOutputCommitter does not report progress while merging paths. Contributed by Kuhu Shukla

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d9457df98 -> e7fa638fe


MAPREDUCE-7164. FileOutputCommitter does not report progress while merging 
paths. Contributed by Kuhu Shukla

(cherry picked from commit 4d8de7ab690ef919b392b12d856482a6a1f2bb3d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7fa638f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7fa638f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7fa638f

Branch: refs/heads/branch-3.1
Commit: e7fa638fe8588174d5d3db287779531de09a3e1b
Parents: d9457df
Author: Jason Lowe 
Authored: Wed Nov 28 14:54:59 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 16:01:05 2018 -0600

--
 .../lib/output/FileOutputCommitter.java | 28 +++--
 .../lib/output/TestFileOutputCommitter.java | 33 
 2 files changed, 51 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7fa638f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index cbae575..94af338 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.Progressable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -400,7 +401,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
 
   if (algorithmVersion == 1) {
 for (FileStatus stat: getAllCommittedTaskPaths(context)) {
-  mergePaths(fs, stat, finalOutput);
+  mergePaths(fs, stat, finalOutput, context);
 }
   }
 
@@ -451,10 +452,11 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
* @throws IOException on any error
*/
   private void mergePaths(FileSystem fs, final FileStatus from,
-  final Path to) throws IOException {
+  final Path to, JobContext context) throws IOException {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Merging data from " + from + " to " + to);
 }
+reportProgress(context);
 FileStatus toStat;
 try {
   toStat = fs.getFileStatus(to);
@@ -478,22 +480,28 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   if (!fs.delete(to, true)) {
 throw new IOException("Failed to delete " + to);
   }
-  renameOrMerge(fs, from, to);
+  renameOrMerge(fs, from, to, context);
 } else {
   //It is a directory so merge everything in the directories
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   } else {
-renameOrMerge(fs, from, to);
+renameOrMerge(fs, from, to, context);
   }
 }
   }
 
-  private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
-  throws IOException {
+  private void reportProgress(JobContext context) {
+if (context instanceof Progressable) {
+  ((Progressable) context).progress();
+}
+  }
+
+  private void renameOrMerge(FileSystem fs, FileStatus from, Path to,
+  JobContext context) throws IOException {
 if (algorithmVersion == 1) {
   if (!fs.rename(from.getPath(), to)) {
 throw new IOException("Failed to rename " + from + " to " + to);
@@ -502,7 +510,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   fs.mkdirs(to);
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   }
@@ -594,7 +602,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   committedTaskPath);
 } else {
   // directly merge everything from 

hadoop git commit: MAPREDUCE-7164. FileOutputCommitter does not report progress while merging paths. Contributed by Kuhu Shukla

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 df0e7766e -> 7a78bdf7b


MAPREDUCE-7164. FileOutputCommitter does not report progress while merging 
paths. Contributed by Kuhu Shukla

(cherry picked from commit 4d8de7ab690ef919b392b12d856482a6a1f2bb3d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a78bdf7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a78bdf7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a78bdf7

Branch: refs/heads/branch-3.2
Commit: 7a78bdf7bbf278678dc10de3133930723972b60d
Parents: df0e776
Author: Jason Lowe 
Authored: Wed Nov 28 14:54:59 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 15:54:59 2018 -0600

--
 .../lib/output/FileOutputCommitter.java | 28 +++--
 .../lib/output/TestFileOutputCommitter.java | 33 
 2 files changed, 51 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a78bdf7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index cbae575..94af338 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.Progressable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -400,7 +401,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
 
   if (algorithmVersion == 1) {
 for (FileStatus stat: getAllCommittedTaskPaths(context)) {
-  mergePaths(fs, stat, finalOutput);
+  mergePaths(fs, stat, finalOutput, context);
 }
   }
 
@@ -451,10 +452,11 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
* @throws IOException on any error
*/
   private void mergePaths(FileSystem fs, final FileStatus from,
-  final Path to) throws IOException {
+  final Path to, JobContext context) throws IOException {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Merging data from " + from + " to " + to);
 }
+reportProgress(context);
 FileStatus toStat;
 try {
   toStat = fs.getFileStatus(to);
@@ -478,22 +480,28 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   if (!fs.delete(to, true)) {
 throw new IOException("Failed to delete " + to);
   }
-  renameOrMerge(fs, from, to);
+  renameOrMerge(fs, from, to, context);
 } else {
   //It is a directory so merge everything in the directories
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   } else {
-renameOrMerge(fs, from, to);
+renameOrMerge(fs, from, to, context);
   }
 }
   }
 
-  private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
-  throws IOException {
+  private void reportProgress(JobContext context) {
+if (context instanceof Progressable) {
+  ((Progressable) context).progress();
+}
+  }
+
+  private void renameOrMerge(FileSystem fs, FileStatus from, Path to,
+  JobContext context) throws IOException {
 if (algorithmVersion == 1) {
   if (!fs.rename(from.getPath(), to)) {
 throw new IOException("Failed to rename " + from + " to " + to);
@@ -502,7 +510,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   fs.mkdirs(to);
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   }
@@ -594,7 +602,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   committedTaskPath);
 } else {
   // directly merge everything from 

hadoop git commit: MAPREDUCE-7164. FileOutputCommitter does not report progress while merging paths. Contributed by Kuhu Shukla

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 300f560fc -> 4d8de7ab6


MAPREDUCE-7164. FileOutputCommitter does not report progress while merging 
paths. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d8de7ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d8de7ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d8de7ab

Branch: refs/heads/trunk
Commit: 4d8de7ab690ef919b392b12d856482a6a1f2bb3d
Parents: 300f560
Author: Jason Lowe 
Authored: Wed Nov 28 14:54:59 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 14:54:59 2018 -0600

--
 .../lib/output/FileOutputCommitter.java | 28 +++--
 .../lib/output/TestFileOutputCommitter.java | 33 
 2 files changed, 51 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d8de7ab/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index cbae575..94af338 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.Progressable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -400,7 +401,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
 
   if (algorithmVersion == 1) {
 for (FileStatus stat: getAllCommittedTaskPaths(context)) {
-  mergePaths(fs, stat, finalOutput);
+  mergePaths(fs, stat, finalOutput, context);
 }
   }
 
@@ -451,10 +452,11 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
* @throws IOException on any error
*/
   private void mergePaths(FileSystem fs, final FileStatus from,
-  final Path to) throws IOException {
+  final Path to, JobContext context) throws IOException {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Merging data from " + from + " to " + to);
 }
+reportProgress(context);
 FileStatus toStat;
 try {
   toStat = fs.getFileStatus(to);
@@ -478,22 +480,28 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   if (!fs.delete(to, true)) {
 throw new IOException("Failed to delete " + to);
   }
-  renameOrMerge(fs, from, to);
+  renameOrMerge(fs, from, to, context);
 } else {
   //It is a directory so merge everything in the directories
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   } else {
-renameOrMerge(fs, from, to);
+renameOrMerge(fs, from, to, context);
   }
 }
   }
 
-  private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
-  throws IOException {
+  private void reportProgress(JobContext context) {
+if (context instanceof Progressable) {
+  ((Progressable) context).progress();
+}
+  }
+
+  private void renameOrMerge(FileSystem fs, FileStatus from, Path to,
+  JobContext context) throws IOException {
 if (algorithmVersion == 1) {
   if (!fs.rename(from.getPath(), to)) {
 throw new IOException("Failed to rename " + from + " to " + to);
@@ -502,7 +510,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   fs.mkdirs(to);
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   }
@@ -594,7 +602,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   committedTaskPath);
 } else {
   // directly merge everything from taskAttemptPath to output directory
-  mergePaths(fs, taskAttemptDirStatus, 

hadoop git commit: YARN-8812. Containers fail during creating a symlink which started with hyphen for a resource file. Contributed by Oleksandr Shevchenko

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 7dac29411 -> e4dcc3e60


YARN-8812. Containers fail during creating a symlink which started with hyphen 
for a resource file. Contributed by Oleksandr Shevchenko

(cherry picked from commit 3ce99e32f7d7887412cae8337cd4ebeb3b2ee308)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4dcc3e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4dcc3e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4dcc3e6

Branch: refs/heads/branch-3.0
Commit: e4dcc3e606902ce15c1b160bbedc3ebe8ccb3a0b
Parents: 7dac294
Author: Jason Lowe 
Authored: Wed Nov 28 08:46:11 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 08:56:04 2018 -0600

--
 .../nodemanager/containermanager/launcher/ContainerLaunch.java | 2 +-
 .../nodemanager/containermanager/launcher/TestContainerLaunch.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4dcc3e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 114c681..b6d91d4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -1199,7 +1199,7 @@ public class ContainerLaunch implements Callable 
{
 
 @Override
 protected void link(Path src, Path dst) throws IOException {
-  line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\"");
+  line("ln -sf -- \"", src.toUri().getPath(), "\" \"", dst.toString(), 
"\"");
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4dcc3e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 2a0a763..685baaf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -152,7 +152,7 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 File shellFile = null;
 File tempFile = null;
 String badSymlink = Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" :
-  "foo@zz%_#*&!-+= bar()";
+  "-foo@zz%_#*&!-+= bar()";
 File symLinkFile = null;
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8812. Containers fail during creating a symlink which started with hyphen for a resource file. Contributed by Oleksandr Shevchenko

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 fc74a3f80 -> d9457df98


YARN-8812. Containers fail during creating a symlink which started with hyphen 
for a resource file. Contributed by Oleksandr Shevchenko

(cherry picked from commit 3ce99e32f7d7887412cae8337cd4ebeb3b2ee308)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9457df9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9457df9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9457df9

Branch: refs/heads/branch-3.1
Commit: d9457df989ba8482d7a65dd2d781d2f28ccfd8f1
Parents: fc74a3f
Author: Jason Lowe 
Authored: Wed Nov 28 08:46:11 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 08:54:04 2018 -0600

--
 .../nodemanager/containermanager/launcher/ContainerLaunch.java | 2 +-
 .../nodemanager/containermanager/launcher/TestContainerLaunch.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9457df9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 999b66f..120ca2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -1404,7 +1404,7 @@ public class ContainerLaunch implements Callable 
{
 
 @Override
 protected void link(Path src, Path dst) throws IOException {
-  line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\"");
+  line("ln -sf -- \"", src.toUri().getPath(), "\" \"", dst.toString(), 
"\"");
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9457df9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 6a30f9e..ab5d47e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -159,7 +159,7 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 File shellFile = null;
 File tempFile = null;
 String badSymlink = Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" :
-  "foo@zz%_#*&!-+= bar()";
+  "-foo@zz%_#*&!-+= bar()";
 File symLinkFile = null;
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8812. Containers fail during creating a symlink which started with hyphen for a resource file. Contributed by Oleksandr Shevchenko

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 db8b2a130 -> df0e7766e


YARN-8812. Containers fail during creating a symlink which started with hyphen 
for a resource file. Contributed by Oleksandr Shevchenko

(cherry picked from commit 3ce99e32f7d7887412cae8337cd4ebeb3b2ee308)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df0e7766
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df0e7766
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df0e7766

Branch: refs/heads/branch-3.2
Commit: df0e7766e45e558a68f55ec5eb447cfb83885836
Parents: db8b2a1
Author: Jason Lowe 
Authored: Wed Nov 28 08:46:11 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 08:50:18 2018 -0600

--
 .../nodemanager/containermanager/launcher/ContainerLaunch.java | 2 +-
 .../nodemanager/containermanager/launcher/TestContainerLaunch.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df0e7766/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index f198e83..f27af55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -1246,7 +1246,7 @@ public class ContainerLaunch implements Callable 
{
 
 @Override
 protected void link(Path src, Path dst) throws IOException {
-  line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\"");
+  line("ln -sf -- \"", src.toUri().getPath(), "\" \"", dst.toString(), 
"\"");
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df0e7766/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 5714a1c..1f7df56 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -159,7 +159,7 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 File shellFile = null;
 File tempFile = null;
 String badSymlink = Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" :
-  "foo@zz%_#*&!-+= bar()";
+  "-foo@zz%_#*&!-+= bar()";
 File symLinkFile = null;
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8812. Containers fail during creating a symlink which started with hyphen for a resource file. Contributed by Oleksandr Shevchenko

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 13a21f660 -> 3ce99e32f


YARN-8812. Containers fail during creating a symlink which started with hyphen 
for a resource file. Contributed by Oleksandr Shevchenko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ce99e32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ce99e32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ce99e32

Branch: refs/heads/trunk
Commit: 3ce99e32f7d7887412cae8337cd4ebeb3b2ee308
Parents: 13a21f6
Author: Jason Lowe 
Authored: Wed Nov 28 08:46:11 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 08:46:11 2018 -0600

--
 .../nodemanager/containermanager/launcher/ContainerLaunch.java | 2 +-
 .../nodemanager/containermanager/launcher/TestContainerLaunch.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ce99e32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 45f6006..60b6e31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -1296,7 +1296,7 @@ public class ContainerLaunch implements Callable 
{
 
 @Override
 protected void link(Path src, Path dst) throws IOException {
-  line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\"");
+  line("ln -sf -- \"", src.toUri().getPath(), "\" \"", dst.toString(), 
"\"");
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ce99e32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 8c01175..93accf2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -167,7 +167,7 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 File shellFile = null;
 File tempFile = null;
 String badSymlink = Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" :
-  "foo@zz%_#*&!-+= bar()";
+  "-foo@zz%_#*&!-+= bar()";
 File symLinkFile = null;
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7148. Fast fail jobs when exceeds dfs quota limitation. Contributed by Wang Yan

2018-11-07 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8dc1f6dbf -> 0b6625a97


MAPREDUCE-7148. Fast fail jobs when exceeds dfs quota limitation. Contributed 
by Wang Yan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b6625a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b6625a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b6625a9

Branch: refs/heads/trunk
Commit: 0b6625a9735f76ab473b41d8ab9b7f3c7678cfff
Parents: 8dc1f6d
Author: Jason Lowe 
Authored: Wed Nov 7 08:20:49 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 7 08:20:49 2018 -0600

--
 ...ClusterStorageCapacityExceededException.java |  51 
 .../hdfs/protocol/QuotaExceededException.java   |   5 +-
 .../org/apache/hadoop/mapred/YarnChild.java |  28 -
 .../org/apache/hadoop/mapred/TestYarnChild.java | 118 +++
 .../apache/hadoop/mapreduce/MRJobConfig.java|   3 +
 .../src/main/resources/mapred-default.xml   |   9 ++
 6 files changed, 209 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b6625a9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
new file mode 100644
index 000..bbbf073
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Exception raised by HDFS indicating that storage capacity in the
+ * cluster filesystem is exceeded. See also
+ * https://issues.apache.org/jira/browse/MAPREDUCE-7148.
+ */
+@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
+@InterfaceStability.Evolving
+public class ClusterStorageCapacityExceededException extends IOException {
+  private static final long serialVersionUID = 1L;
+
+  public ClusterStorageCapacityExceededException() {
+super();
+  }
+
+  public ClusterStorageCapacityExceededException(String message) {
+super(message);
+  }
+
+  public ClusterStorageCapacityExceededException(String message,
+  Throwable cause) {
+super(message, cause);
+  }
+
+  public ClusterStorageCapacityExceededException(Throwable cause) {
+super(cause);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b6625a9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
index f4e7f34..7033f3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
@@ -18,10 +18,9 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ClusterStorageCapacityExceededException;
 
 /**
  * This exception is thrown when modification to HDFS results in violation
@@ -37,7 +36,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceAudience.Private
 

hadoop git commit: MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. Contributed by Peter Bacsko

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8b2363afe -> 31aafb886


MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. 
Contributed by Peter Bacsko

(cherry picked from commit ba1f9d66d94ed0b85084d7c40c09a87478b3a05a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31aafb88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31aafb88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31aafb88

Branch: refs/heads/branch-2.8
Commit: 31aafb88644636fc46603c2b089a635ca92e35c9
Parents: 8b2363a
Author: Jason Lowe 
Authored: Tue Nov 6 17:55:51 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 18:17:46 2018 -0600

--
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31aafb88/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 235313c..a6e988e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -851,6 +851,8 @@ public class ShuffleHandler extends AuxiliaryService {
 @Override
 public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) 
 throws Exception {
+  super.channelOpen(ctx, evt);
+
   if ((maxShuffleConnections > 0) && (accepted.size() >= 
maxShuffleConnections)) {
 LOG.info(String.format("Current number of shuffle connections (%d) is 
" + 
 "greater than or equal to the max allowed shuffle connections 
(%d)", 
@@ -859,8 +861,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
   accepted.add(evt.getChannel());
-  super.channelOpen(ctx, evt);
- 
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. Contributed by Peter Bacsko

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 fa47ae2f7 -> b6e1aed1a


MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. 
Contributed by Peter Bacsko

(cherry picked from commit ba1f9d66d94ed0b85084d7c40c09a87478b3a05a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6e1aed1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6e1aed1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6e1aed1

Branch: refs/heads/branch-2.9
Commit: b6e1aed1ad3463936f5ed49a44affa23546ca31d
Parents: fa47ae2
Author: Jason Lowe 
Authored: Tue Nov 6 17:55:51 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 18:16:24 2018 -0600

--
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6e1aed1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 0bab750..51cfa54 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -911,6 +911,8 @@ public class ShuffleHandler extends AuxiliaryService {
 @Override
 public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) 
 throws Exception {
+  super.channelOpen(ctx, evt);
+
   if ((maxShuffleConnections > 0) && (accepted.size() >= 
maxShuffleConnections)) {
 LOG.info(String.format("Current number of shuffle connections (%d) is 
" + 
 "greater than or equal to the max allowed shuffle connections 
(%d)", 
@@ -926,8 +928,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
   accepted.add(evt.getChannel());
-  super.channelOpen(ctx, evt);
- 
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. Contributed by Peter Bacsko

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d9b3b5838 -> 5e433e5bd


MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. 
Contributed by Peter Bacsko

(cherry picked from commit ba1f9d66d94ed0b85084d7c40c09a87478b3a05a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e433e5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e433e5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e433e5b

Branch: refs/heads/branch-2
Commit: 5e433e5bd019f42f4de3063cf5a03dd41c3ff942
Parents: d9b3b58
Author: Jason Lowe 
Authored: Tue Nov 6 17:55:51 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 18:14:12 2018 -0600

--
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e433e5b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 0bab750..51cfa54 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -911,6 +911,8 @@ public class ShuffleHandler extends AuxiliaryService {
 @Override
 public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) 
 throws Exception {
+  super.channelOpen(ctx, evt);
+
   if ((maxShuffleConnections > 0) && (accepted.size() >= 
maxShuffleConnections)) {
 LOG.info(String.format("Current number of shuffle connections (%d) is 
" + 
 "greater than or equal to the max allowed shuffle connections 
(%d)", 
@@ -926,8 +928,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
   accepted.add(evt.getChannel());
-  super.channelOpen(ctx, evt);
- 
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. Contributed by Peter Bacsko

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c4d390a77 -> e830d73ad


MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. 
Contributed by Peter Bacsko

(cherry picked from commit ba1f9d66d94ed0b85084d7c40c09a87478b3a05a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e830d73a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e830d73a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e830d73a

Branch: refs/heads/branch-3.0
Commit: e830d73adb3cf93d2d126f37c91e7cc6f6aa2813
Parents: c4d390a
Author: Jason Lowe 
Authored: Tue Nov 6 17:55:51 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 18:11:39 2018 -0600

--
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e830d73a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index aeda9cc..c222685 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -910,6 +910,8 @@ public class ShuffleHandler extends AuxiliaryService {
 @Override
 public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) 
 throws Exception {
+  super.channelOpen(ctx, evt);
+
   if ((maxShuffleConnections > 0) && (accepted.size() >= 
maxShuffleConnections)) {
 LOG.info(String.format("Current number of shuffle connections (%d) is 
" + 
 "greater than or equal to the max allowed shuffle connections 
(%d)", 
@@ -925,8 +927,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
   accepted.add(evt.getChannel());
-  super.channelOpen(ctx, evt);
- 
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. Contributed by Peter Bacsko

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 f65324aa4 -> afbcb51ac


MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. 
Contributed by Peter Bacsko

(cherry picked from commit ba1f9d66d94ed0b85084d7c40c09a87478b3a05a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afbcb51a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afbcb51a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afbcb51a

Branch: refs/heads/branch-3.1
Commit: afbcb51ac12b0277c1c44ed4b3d9b2e022a101ab
Parents: f65324a
Author: Jason Lowe 
Authored: Tue Nov 6 17:55:51 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 18:07:57 2018 -0600

--
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afbcb51a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index aeda9cc..c222685 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -910,6 +910,8 @@ public class ShuffleHandler extends AuxiliaryService {
 @Override
 public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) 
 throws Exception {
+  super.channelOpen(ctx, evt);
+
   if ((maxShuffleConnections > 0) && (accepted.size() >= 
maxShuffleConnections)) {
 LOG.info(String.format("Current number of shuffle connections (%d) is 
" + 
 "greater than or equal to the max allowed shuffle connections 
(%d)", 
@@ -925,8 +927,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
   accepted.add(evt.getChannel());
-  super.channelOpen(ctx, evt);
- 
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. Contributed by Peter Bacsko

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 e7fba62e6 -> ff5a2cb5c


MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. 
Contributed by Peter Bacsko

(cherry picked from commit ba1f9d66d94ed0b85084d7c40c09a87478b3a05a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff5a2cb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff5a2cb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff5a2cb5

Branch: refs/heads/branch-3.2
Commit: ff5a2cb5c9bcbc50478c5eca16d0d7aec9701ec2
Parents: e7fba62
Author: Jason Lowe 
Authored: Tue Nov 6 17:55:51 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 17:57:09 2018 -0600

--
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5a2cb5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index aeda9cc..c222685 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -910,6 +910,8 @@ public class ShuffleHandler extends AuxiliaryService {
 @Override
 public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) 
 throws Exception {
+  super.channelOpen(ctx, evt);
+
   if ((maxShuffleConnections > 0) && (accepted.size() >= 
maxShuffleConnections)) {
 LOG.info(String.format("Current number of shuffle connections (%d) is 
" + 
 "greater than or equal to the max allowed shuffle connections 
(%d)", 
@@ -925,8 +927,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
   accepted.add(evt.getChannel());
-  super.channelOpen(ctx, evt);
- 
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. Contributed by Peter Bacsko

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 08d69d91f -> ba1f9d66d


MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. 
Contributed by Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba1f9d66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba1f9d66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba1f9d66

Branch: refs/heads/trunk
Commit: ba1f9d66d94ed0b85084d7c40c09a87478b3a05a
Parents: 08d69d9
Author: Jason Lowe 
Authored: Tue Nov 6 17:55:51 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 17:55:51 2018 -0600

--
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1f9d66/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index aeda9cc..c222685 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -910,6 +910,8 @@ public class ShuffleHandler extends AuxiliaryService {
 @Override
 public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) 
 throws Exception {
+  super.channelOpen(ctx, evt);
+
   if ((maxShuffleConnections > 0) && (accepted.size() >= 
maxShuffleConnections)) {
 LOG.info(String.format("Current number of shuffle connections (%d) is 
" + 
 "greater than or equal to the max allowed shuffle connections 
(%d)", 
@@ -925,8 +927,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
   accepted.add(evt.getChannel());
-  super.channelOpen(ctx, evt);
- 
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8865. RMStateStore contains large number of expired RMDelegationToken. Contributed by Wilfred Spiegelenburg

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ddb349ceb -> f3e6682ba


YARN-8865. RMStateStore contains large number of expired RMDelegationToken. 
Contributed by Wilfred Spiegelenburg

(cherry picked from commit ab6aa4c7265db5bcbb446c2f779289023d454b81)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3e6682b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3e6682b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3e6682b

Branch: refs/heads/branch-3.0
Commit: f3e6682ba73b76f7bb0541e96181f106b450ce84
Parents: ddb349c
Author: Jason Lowe 
Authored: Tue Nov 6 08:40:59 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 08:59:47 2018 -0600

--
 .../AbstractDelegationTokenSecretManager.java   | 12 ++-
 .../hs/TestJHSDelegationTokenSecretManager.java | 78 +---
 .../security/TestRMDelegationTokens.java| 63 
 3 files changed, 140 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3e6682b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index 7593b4e..9259d87 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -300,7 +300,8 @@ extends AbstractDelegationTokenIdentifier>
 
   /**
* This method is intended to be used for recovering persisted delegation
-   * tokens
+   * tokens. Tokens that have an unknown DelegationKey are
+   * marked as expired and automatically cleaned up.
* This method must be called before this secret manager is activated (before
* startThreads() is called)
* @param identifier identifier read from persistent storage
@@ -316,12 +317,15 @@ extends AbstractDelegationTokenIdentifier>
 }
 int keyId = identifier.getMasterKeyId();
 DelegationKey dKey = allKeys.get(keyId);
+byte[] password = null;
 if (dKey == null) {
-  LOG.warn("No KEY found for persisted identifier "
+  LOG.warn("No KEY found for persisted identifier, expiring stored token "
   + formatTokenId(identifier));
-  return;
+  // make sure the token is expired
+  renewDate = 0L;
+} else {
+  password = createPassword(identifier.getBytes(), dKey.getKey());
 }
-byte[] password = createPassword(identifier.getBytes(), dKey.getKey());
 if (identifier.getSequenceNumber() > getDelegationTokenSeqNum()) {
   setDelegationTokenSeqNum(identifier.getSequenceNumber());
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3e6682b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
index f41bb3a..64715fb 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
@@ -21,12 +21,12 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
@@ -34,17 +34,21 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
 import 

hadoop git commit: YARN-8865. RMStateStore contains large number of expired RMDelegationToken. Contributed by Wilfred Spiegelenburg

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 631b31110 -> 7335d940d


YARN-8865. RMStateStore contains large number of expired RMDelegationToken. 
Contributed by Wilfred Spiegelenburg

(cherry picked from commit ab6aa4c7265db5bcbb446c2f779289023d454b81)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7335d940
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7335d940
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7335d940

Branch: refs/heads/branch-3.1
Commit: 7335d940de23e2af946445d4c0c2a59207e3b013
Parents: 631b311
Author: Jason Lowe 
Authored: Tue Nov 6 08:40:59 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 08:52:29 2018 -0600

--
 .../AbstractDelegationTokenSecretManager.java   | 12 ++-
 .../hs/TestJHSDelegationTokenSecretManager.java | 78 +---
 .../security/TestRMDelegationTokens.java| 63 
 3 files changed, 140 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7335d940/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index 7593b4e..9259d87 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -300,7 +300,8 @@ extends AbstractDelegationTokenIdentifier>
 
   /**
* This method is intended to be used for recovering persisted delegation
-   * tokens
+   * tokens. Tokens that have an unknown DelegationKey are
+   * marked as expired and automatically cleaned up.
* This method must be called before this secret manager is activated (before
* startThreads() is called)
* @param identifier identifier read from persistent storage
@@ -316,12 +317,15 @@ extends AbstractDelegationTokenIdentifier>
 }
 int keyId = identifier.getMasterKeyId();
 DelegationKey dKey = allKeys.get(keyId);
+byte[] password = null;
 if (dKey == null) {
-  LOG.warn("No KEY found for persisted identifier "
+  LOG.warn("No KEY found for persisted identifier, expiring stored token "
   + formatTokenId(identifier));
-  return;
+  // make sure the token is expired
+  renewDate = 0L;
+} else {
+  password = createPassword(identifier.getBytes(), dKey.getKey());
 }
-byte[] password = createPassword(identifier.getBytes(), dKey.getKey());
 if (identifier.getSequenceNumber() > getDelegationTokenSeqNum()) {
   setDelegationTokenSeqNum(identifier.getSequenceNumber());
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7335d940/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
index f41bb3a..64715fb 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
@@ -21,12 +21,12 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
@@ -34,17 +34,21 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
 import 

hadoop git commit: YARN-8865. RMStateStore contains large number of expired RMDelegationToken. Contributed by Wilfred Spiegelenburg

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 f00125e2d -> 926593420


YARN-8865. RMStateStore contains large number of expired RMDelegationToken. 
Contributed by Wilfred Spiegelenburg

(cherry picked from commit ab6aa4c7265db5bcbb446c2f779289023d454b81)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92659342
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92659342
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92659342

Branch: refs/heads/branch-3.2
Commit: 9265934201cb086265345eafa5553ecdd7500e19
Parents: f00125e
Author: Jason Lowe 
Authored: Tue Nov 6 08:40:59 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 08:47:30 2018 -0600

--
 .../AbstractDelegationTokenSecretManager.java   | 12 ++-
 .../hs/TestJHSDelegationTokenSecretManager.java | 78 +---
 .../security/TestRMDelegationTokens.java| 63 
 3 files changed, 140 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92659342/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index 7593b4e..9259d87 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -300,7 +300,8 @@ extends AbstractDelegationTokenIdentifier>
 
   /**
* This method is intended to be used for recovering persisted delegation
-   * tokens
+   * tokens. Tokens that have an unknown DelegationKey are
+   * marked as expired and automatically cleaned up.
* This method must be called before this secret manager is activated (before
* startThreads() is called)
* @param identifier identifier read from persistent storage
@@ -316,12 +317,15 @@ extends AbstractDelegationTokenIdentifier>
 }
 int keyId = identifier.getMasterKeyId();
 DelegationKey dKey = allKeys.get(keyId);
+byte[] password = null;
 if (dKey == null) {
-  LOG.warn("No KEY found for persisted identifier "
+  LOG.warn("No KEY found for persisted identifier, expiring stored token "
   + formatTokenId(identifier));
-  return;
+  // make sure the token is expired
+  renewDate = 0L;
+} else {
+  password = createPassword(identifier.getBytes(), dKey.getKey());
 }
-byte[] password = createPassword(identifier.getBytes(), dKey.getKey());
 if (identifier.getSequenceNumber() > getDelegationTokenSeqNum()) {
   setDelegationTokenSeqNum(identifier.getSequenceNumber());
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92659342/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
index f41bb3a..64715fb 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
@@ -21,12 +21,12 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
@@ -34,17 +34,21 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
 import 

hadoop git commit: YARN-8865. RMStateStore contains large number of expired RMDelegationToken. Contributed by Wilfred Spiegelenburg

2018-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 49412a128 -> ab6aa4c72


YARN-8865. RMStateStore contains large number of expired RMDelegationToken. 
Contributed by Wilfred Spiegelenburg


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab6aa4c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab6aa4c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab6aa4c7

Branch: refs/heads/trunk
Commit: ab6aa4c7265db5bcbb446c2f779289023d454b81
Parents: 49412a1
Author: Jason Lowe 
Authored: Tue Nov 6 08:40:59 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 08:40:59 2018 -0600

--
 .../AbstractDelegationTokenSecretManager.java   | 12 ++-
 .../hs/TestJHSDelegationTokenSecretManager.java | 78 +---
 .../security/TestRMDelegationTokens.java| 63 
 3 files changed, 140 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab6aa4c7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index 7593b4e..9259d87 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -300,7 +300,8 @@ extends AbstractDelegationTokenIdentifier>
 
   /**
* This method is intended to be used for recovering persisted delegation
-   * tokens
+   * tokens. Tokens that have an unknown DelegationKey are
+   * marked as expired and automatically cleaned up.
* This method must be called before this secret manager is activated (before
* startThreads() is called)
* @param identifier identifier read from persistent storage
@@ -316,12 +317,15 @@ extends AbstractDelegationTokenIdentifier>
 }
 int keyId = identifier.getMasterKeyId();
 DelegationKey dKey = allKeys.get(keyId);
+byte[] password = null;
 if (dKey == null) {
-  LOG.warn("No KEY found for persisted identifier "
+  LOG.warn("No KEY found for persisted identifier, expiring stored token "
   + formatTokenId(identifier));
-  return;
+  // make sure the token is expired
+  renewDate = 0L;
+} else {
+  password = createPassword(identifier.getBytes(), dKey.getKey());
 }
-byte[] password = createPassword(identifier.getBytes(), dKey.getKey());
 if (identifier.getSequenceNumber() > getDelegationTokenSeqNum()) {
   setDelegationTokenSeqNum(identifier.getSequenceNumber());
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab6aa4c7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
index f41bb3a..64715fb 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
@@ -21,12 +21,12 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
@@ -34,17 +34,21 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
+import 

hadoop git commit: YARN-8904. TestRMDelegationTokens can fail in testRMDTMasterKeyStateOnRollingMasterKey. Contributed by Wilfred Spiegelenburg

2018-10-23 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e4464f959 -> a9bc3e370


YARN-8904. TestRMDelegationTokens can fail in 
testRMDTMasterKeyStateOnRollingMasterKey. Contributed by Wilfred Spiegelenburg

(cherry picked from commit 93fb3b4b9cbc63e65d590676a5a318d555a25904)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9bc3e37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9bc3e37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9bc3e37

Branch: refs/heads/branch-3.0
Commit: a9bc3e37080c328ebfe090cf69149678ad11fc05
Parents: e4464f9
Author: Jason Lowe 
Authored: Tue Oct 23 12:49:15 2018 -0500
Committer: Jason Lowe 
Committed: Tue Oct 23 12:57:49 2018 -0500

--
 .../security/TestRMDelegationTokens.java| 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9bc3e37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
index 2c52377..aae86b0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
@@ -97,7 +97,17 @@ public class TestRMDelegationTokens {
 RMDelegationTokenSecretManager dtSecretManager =
 rm1.getRMContext().getRMDelegationTokenSecretManager();
 // assert all master keys are saved
-Assert.assertEquals(dtSecretManager.getAllMasterKeys(), 
rmDTMasterKeyState);
+dtSecretManager.getAllMasterKeys().forEach(managerKey -> {
+  int keyId = managerKey.getKeyId();
+  boolean found = false;
+  for (DelegationKey stateKey: rmDTMasterKeyState) {
+if (stateKey.getKeyId() == keyId) {
+  found = true;
+  break;
+}
+  }
+  Assert.assertTrue("Master key not found: " + keyId, found);
+});
 
 // request to generate a RMDelegationToken
 GetDelegationTokenRequest request = mock(GetDelegationTokenRequest.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8904. TestRMDelegationTokens can fail in testRMDTMasterKeyStateOnRollingMasterKey. Contributed by Wilfred Spiegelenburg

2018-10-23 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 3e3b08885 -> 3be72b7aa


YARN-8904. TestRMDelegationTokens can fail in 
testRMDTMasterKeyStateOnRollingMasterKey. Contributed by Wilfred Spiegelenburg

(cherry picked from commit 93fb3b4b9cbc63e65d590676a5a318d555a25904)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3be72b7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3be72b7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3be72b7a

Branch: refs/heads/branch-3.1
Commit: 3be72b7aa240dc57d441ee7466e13b1da53c717d
Parents: 3e3b088
Author: Jason Lowe 
Authored: Tue Oct 23 12:49:15 2018 -0500
Committer: Jason Lowe 
Committed: Tue Oct 23 12:55:48 2018 -0500

--
 .../security/TestRMDelegationTokens.java| 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3be72b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
index 2c52377..aae86b0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
@@ -97,7 +97,17 @@ public class TestRMDelegationTokens {
 RMDelegationTokenSecretManager dtSecretManager =
 rm1.getRMContext().getRMDelegationTokenSecretManager();
 // assert all master keys are saved
-Assert.assertEquals(dtSecretManager.getAllMasterKeys(), 
rmDTMasterKeyState);
+dtSecretManager.getAllMasterKeys().forEach(managerKey -> {
+  int keyId = managerKey.getKeyId();
+  boolean found = false;
+  for (DelegationKey stateKey: rmDTMasterKeyState) {
+if (stateKey.getKeyId() == keyId) {
+  found = true;
+  break;
+}
+  }
+  Assert.assertTrue("Master key not found: " + keyId, found);
+});
 
 // request to generate a RMDelegationToken
 GetDelegationTokenRequest request = mock(GetDelegationTokenRequest.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8904. TestRMDelegationTokens can fail in testRMDTMasterKeyStateOnRollingMasterKey. Contributed by Wilfred Spiegelenburg

2018-10-23 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 660fff313 -> 709775592


YARN-8904. TestRMDelegationTokens can fail in 
testRMDTMasterKeyStateOnRollingMasterKey. Contributed by Wilfred Spiegelenburg

(cherry picked from commit 93fb3b4b9cbc63e65d590676a5a318d555a25904)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70977559
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70977559
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70977559

Branch: refs/heads/branch-3.2
Commit: 709775592579543aa1e20a0e5d176894da93cc85
Parents: 660fff3
Author: Jason Lowe 
Authored: Tue Oct 23 12:49:15 2018 -0500
Committer: Jason Lowe 
Committed: Tue Oct 23 12:54:04 2018 -0500

--
 .../security/TestRMDelegationTokens.java| 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70977559/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
index 2c52377..aae86b0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
@@ -97,7 +97,17 @@ public class TestRMDelegationTokens {
 RMDelegationTokenSecretManager dtSecretManager =
 rm1.getRMContext().getRMDelegationTokenSecretManager();
 // assert all master keys are saved
-Assert.assertEquals(dtSecretManager.getAllMasterKeys(), 
rmDTMasterKeyState);
+dtSecretManager.getAllMasterKeys().forEach(managerKey -> {
+  int keyId = managerKey.getKeyId();
+  boolean found = false;
+  for (DelegationKey stateKey: rmDTMasterKeyState) {
+if (stateKey.getKeyId() == keyId) {
+  found = true;
+  break;
+}
+  }
+  Assert.assertTrue("Master key not found: " + keyId, found);
+});
 
 // request to generate a RMDelegationToken
 GetDelegationTokenRequest request = mock(GetDelegationTokenRequest.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8904. TestRMDelegationTokens can fail in testRMDTMasterKeyStateOnRollingMasterKey. Contributed by Wilfred Spiegelenburg

2018-10-23 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk dd268a64d -> 93fb3b4b9


YARN-8904. TestRMDelegationTokens can fail in 
testRMDTMasterKeyStateOnRollingMasterKey. Contributed by Wilfred Spiegelenburg


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93fb3b4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93fb3b4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93fb3b4b

Branch: refs/heads/trunk
Commit: 93fb3b4b9cbc63e65d590676a5a318d555a25904
Parents: dd268a6
Author: Jason Lowe 
Authored: Tue Oct 23 12:49:15 2018 -0500
Committer: Jason Lowe 
Committed: Tue Oct 23 12:49:15 2018 -0500

--
 .../security/TestRMDelegationTokens.java| 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93fb3b4b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
index 2c52377..aae86b0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
@@ -97,7 +97,17 @@ public class TestRMDelegationTokens {
 RMDelegationTokenSecretManager dtSecretManager =
 rm1.getRMContext().getRMDelegationTokenSecretManager();
 // assert all master keys are saved
-Assert.assertEquals(dtSecretManager.getAllMasterKeys(), 
rmDTMasterKeyState);
+dtSecretManager.getAllMasterKeys().forEach(managerKey -> {
+  int keyId = managerKey.getKeyId();
+  boolean found = false;
+  for (DelegationKey stateKey: rmDTMasterKeyState) {
+if (stateKey.getKeyId() == keyId) {
+  found = true;
+  break;
+}
+  }
+  Assert.assertTrue("Master key not found: " + keyId, found);
+});
 
 // request to generate a RMDelegationToken
 GetDelegationTokenRequest request = mock(GetDelegationTokenRequest.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 a1d75 -> 622919d8d


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe

(cherry picked from commit 9abda83947a5babfe5a650b3409ad952f6782105)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/622919d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/622919d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/622919d8

Branch: refs/heads/branch-2.9
Commit: 622919d8dbde1eb878f2f82252e9b26c9ebcabbb
Parents: a1d
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:46:58 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/622919d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/622919d8/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e212d7d81 -> e412d8f6c


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe

(cherry picked from commit 9abda83947a5babfe5a650b3409ad952f6782105)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e412d8f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e412d8f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e412d8f6

Branch: refs/heads/branch-2
Commit: e412d8f6cd8ac4677fd8d5d6ebbb38f628a3854f
Parents: e212d7d8
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:45:56 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e412d8f6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e412d8f6/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ae42d59eb -> 0aee3a0c3


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe

(cherry picked from commit 9abda83947a5babfe5a650b3409ad952f6782105)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0aee3a0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0aee3a0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0aee3a0c

Branch: refs/heads/branch-3.0
Commit: 0aee3a0c3f69dc55a9066b3c31081650e425889c
Parents: ae42d59
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:44:39 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aee3a0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aee3a0c/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 9c350785d -> 65b27f8ed


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe

(cherry picked from commit 9abda83947a5babfe5a650b3409ad952f6782105)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65b27f8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65b27f8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65b27f8e

Branch: refs/heads/branch-3.1
Commit: 65b27f8ed2824b40bc7730eaa960da1b759fda43
Parents: 9c35078
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:43:19 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65b27f8e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65b27f8e/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 6380ee551 -> 30fc5966a


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe

(cherry picked from commit 9abda83947a5babfe5a650b3409ad952f6782105)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30fc5966
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30fc5966
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30fc5966

Branch: refs/heads/branch-3.2
Commit: 30fc5966a2445a3ec559f840626770f39efedbbf
Parents: 6380ee5
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:40:57 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5966/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5966/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 

hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 24dc068a3 -> 9abda8394


HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9abda839
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9abda839
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9abda839

Branch: refs/heads/trunk
Commit: 9abda83947a5babfe5a650b3409ad952f6782105
Parents: 24dc068
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:38:42 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9abda839/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9abda839/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_defla
 }
 
 JNIEXPORT jstring JNICALL 

hadoop git commit: YARN-8861. executorLock is misleading in ContainerLaunch. Contributed by Chandni Singh

2018-10-11 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 70d39c74d -> cdbca8b13


YARN-8861. executorLock is misleading in ContainerLaunch. Contributed by 
Chandni Singh

(cherry picked from commit e787d65a08f5d5245d2313fc34f2dde518bfaa5b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdbca8b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdbca8b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdbca8b1

Branch: refs/heads/branch-3.2
Commit: cdbca8b133f0d12d448470cde7af58262dbbd824
Parents: 70d39c7
Author: Jason Lowe 
Authored: Thu Oct 11 10:54:57 2018 -0500
Committer: Jason Lowe 
Committed: Thu Oct 11 10:58:48 2018 -0500

--
 .../containermanager/launcher/ContainerLaunch.java| 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdbca8b1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 6776836..f198e83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -135,7 +135,7 @@ public class ContainerLaunch implements Callable {
 
   protected final LocalDirsHandlerService dirsHandler;
 
-  private final Lock containerExecLock = new ReentrantLock();
+  private final Lock launchLock = new ReentrantLock();
 
   public ContainerLaunch(Context context, Configuration configuration,
   Dispatcher dispatcher, ContainerExecutor exec, Application app,
@@ -485,11 +485,11 @@ public class ContainerLaunch implements Callable 
{
   throws IOException, ConfigurationException {
 int launchPrep = prepareForLaunch(ctx);
 if (launchPrep == 0) {
-  containerExecLock.lock();
+  launchLock.lock();
   try {
 return exec.launchContainer(ctx);
   } finally {
-containerExecLock.unlock();
+launchLock.unlock();
   }
 }
 return launchPrep;
@@ -499,18 +499,18 @@ public class ContainerLaunch implements Callable 
{
   throws IOException, ConfigurationException {
 int launchPrep = prepareForLaunch(ctx);
 if (launchPrep == 0) {
-  containerExecLock.lock();
+  launchLock.lock();
   try {
 return exec.relaunchContainer(ctx);
   } finally {
-containerExecLock.unlock();
+launchLock.unlock();
   }
 }
 return launchPrep;
   }
 
   void reapContainer() throws IOException {
-containerExecLock.lock();
+launchLock.lock();
 try {
   // Reap the container
   boolean result = exec.reapContainer(
@@ -524,7 +524,7 @@ public class ContainerLaunch implements Callable {
   }
   cleanupContainerFiles(getContainerWorkDir());
 } finally {
-  containerExecLock.unlock();
+  launchLock.unlock();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8861. executorLock is misleading in ContainerLaunch. Contributed by Chandni Singh

2018-10-11 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk ee816f1fd -> e787d65a0


YARN-8861. executorLock is misleading in ContainerLaunch. Contributed by 
Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e787d65a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e787d65a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e787d65a

Branch: refs/heads/trunk
Commit: e787d65a08f5d5245d2313fc34f2dde518bfaa5b
Parents: ee816f1
Author: Jason Lowe 
Authored: Thu Oct 11 10:54:57 2018 -0500
Committer: Jason Lowe 
Committed: Thu Oct 11 10:54:57 2018 -0500

--
 .../containermanager/launcher/ContainerLaunch.java| 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e787d65a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 6776836..f198e83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -135,7 +135,7 @@ public class ContainerLaunch implements Callable {
 
   protected final LocalDirsHandlerService dirsHandler;
 
-  private final Lock containerExecLock = new ReentrantLock();
+  private final Lock launchLock = new ReentrantLock();
 
   public ContainerLaunch(Context context, Configuration configuration,
   Dispatcher dispatcher, ContainerExecutor exec, Application app,
@@ -485,11 +485,11 @@ public class ContainerLaunch implements Callable 
{
   throws IOException, ConfigurationException {
 int launchPrep = prepareForLaunch(ctx);
 if (launchPrep == 0) {
-  containerExecLock.lock();
+  launchLock.lock();
   try {
 return exec.launchContainer(ctx);
   } finally {
-containerExecLock.unlock();
+launchLock.unlock();
   }
 }
 return launchPrep;
@@ -499,18 +499,18 @@ public class ContainerLaunch implements Callable 
{
   throws IOException, ConfigurationException {
 int launchPrep = prepareForLaunch(ctx);
 if (launchPrep == 0) {
-  containerExecLock.lock();
+  launchLock.lock();
   try {
 return exec.relaunchContainer(ctx);
   } finally {
-containerExecLock.unlock();
+launchLock.unlock();
   }
 }
 return launchPrep;
   }
 
   void reapContainer() throws IOException {
-containerExecLock.lock();
+launchLock.lock();
 try {
   // Reap the container
   boolean result = exec.reapContainer(
@@ -524,7 +524,7 @@ public class ContainerLaunch implements Callable {
   }
   cleanupContainerFiles(getContainerWorkDir());
 } finally {
-  containerExecLock.unlock();
+  launchLock.unlock();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7644. NM gets backed up deleting docker containers. Contributed by Chandni Singh

2018-10-10 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 5f97c0cd7 -> 145c7aa66


YARN-7644. NM gets backed up deleting docker containers. Contributed by Chandni 
Singh

(cherry picked from commit 5ce70e1211e624d58e8bb1181aec00729ebdc085)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/145c7aa6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/145c7aa6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/145c7aa6

Branch: refs/heads/branch-3.2
Commit: 145c7aa663bcc55e7f354fe4ae12110650eb4c42
Parents: 5f97c0c
Author: Jason Lowe 
Authored: Wed Oct 10 09:52:19 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 10 10:01:52 2018 -0500

--
 .../launcher/ContainerCleanup.java  | 229 +++
 .../launcher/ContainerLaunch.java   | 226 +-
 .../launcher/ContainersLauncher.java|  14 +-
 .../launcher/TestContainerCleanup.java  | 108 +
 .../launcher/TestContainersLauncher.java|  12 +-
 5 files changed, 401 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/145c7aa6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
new file mode 100644
index 000..963d28b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime;
+import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.EXIT_CODE_FILE_SUFFIX;
+
+/**
+ * Cleanup the container.
+ * Cancels the launch if launch has not started yet or signals
+ * the executor to not execute the process if not already done so.
+ * Also, sends a SIGTERM followed by a SIGKILL to the process if
+ * the process id is available.
+ */
+public class ContainerCleanup implements Runnable {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ContainerCleanup.class);
+
+  private 

hadoop git commit: YARN-7644. NM gets backed up deleting docker containers. Contributed by Chandni Singh

2018-10-10 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk cd280514b -> 5ce70e121


YARN-7644. NM gets backed up deleting docker containers. Contributed by Chandni 
Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ce70e12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ce70e12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ce70e12

Branch: refs/heads/trunk
Commit: 5ce70e1211e624d58e8bb1181aec00729ebdc085
Parents: cd28051
Author: Jason Lowe 
Authored: Wed Oct 10 09:52:19 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 10 09:52:19 2018 -0500

--
 .../launcher/ContainerCleanup.java  | 229 +++
 .../launcher/ContainerLaunch.java   | 226 +-
 .../launcher/ContainersLauncher.java|  14 +-
 .../launcher/TestContainerCleanup.java  | 108 +
 .../launcher/TestContainersLauncher.java|  12 +-
 5 files changed, 401 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ce70e12/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
new file mode 100644
index 000..963d28b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime;
+import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.EXIT_CODE_FILE_SUFFIX;
+
+/**
+ * Cleanup the container.
+ * Cancels the launch if launch has not started yet or signals
+ * the executor to not execute the process if not already done so.
+ * Also, sends a SIGTERM followed by a SIGKILL to the process if
+ * the process id is available.
+ */
+public class ContainerCleanup implements Runnable {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ContainerCleanup.class);
+
+  private final Context context;
+  private final Configuration conf;
+  private final 

hadoop git commit: MAPREDUCE-7130. Rumen crashes trying to handle MRAppMaster recovery events. Contributed by Peter Bacsko

2018-10-09 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 03d66b1f5 -> 4b5b1ac3d


MAPREDUCE-7130. Rumen crashes trying to handle MRAppMaster recovery events. 
Contributed by  Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b5b1ac3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b5b1ac3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b5b1ac3

Branch: refs/heads/trunk
Commit: 4b5b1ac3d10f1a190450ad59b8be5c9568921852
Parents: 03d66b1
Author: Jason Lowe 
Authored: Tue Oct 9 13:27:03 2018 -0500
Committer: Jason Lowe 
Committed: Tue Oct 9 13:27:03 2018 -0500

--
 .../hadoop/tools/rumen/Pre21JobHistoryConstants.java | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b5b1ac3/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
--
diff --git 
a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
 
b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
index 239d666..8adff46 100644
--- 
a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
+++ 
b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
@@ -44,12 +44,17 @@ public class Pre21JobHistoryConstants {
   /**
* This enum contains some of the values commonly used by history log 
events. 
* since values in history can only be strings - Values.name() is used in 
-   * most places in history file. 
+   * most places in history file.
+   *
+   * Note: "SUCCEEDED" is actually not a pre-0.21 value, but it might appear
+   * in jhist logs when the event is an unsuccessful job completion, yet, the
+   * overall job status is "SUCCEEDED".
*/
   public static enum Values {
-SUCCESS, FAILED, KILLED, MAP, REDUCE, CLEANUP, RUNNING, PREP, SETUP
+SUCCESS, SUCCEEDED, FAILED, KILLED, MAP, REDUCE, CLEANUP, RUNNING, PREP,
+SETUP
   }
-  
+
   /**
* Regex for Pre21 V1(old) jobhistory filename
*   i.e jt-identifier_job-id_user-name_job-name


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-4254. ApplicationAttempt stuck for ever due to UnknownHostException. Contributed by Bibin A Chundatt

2018-10-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk c96836565 -> 9bb2801e8


YARN-4254. ApplicationAttempt stuck for ever due to UnknownHostException. 
Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9bb2801e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9bb2801e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9bb2801e

Branch: refs/heads/trunk
Commit: 9bb2801e8ce1e6298241944a65f593f555ae10e2
Parents: c968365
Author: Jason Lowe 
Authored: Fri Oct 5 15:52:46 2018 -0500
Committer: Jason Lowe 
Committed: Fri Oct 5 15:52:46 2018 -0500

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 +++-
 .../src/main/resources/yarn-default.xml |  5 +++
 .../resourcemanager/ResourceTrackerService.java | 23 ++
 .../TestResourceTrackerService.java | 45 
 4 files changed, 81 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bb2801e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 95861d7..6488ebf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -541,7 +541,14 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT =
 RM_PREFIX + "resource-tracker.client.thread-count";
   public static final int DEFAULT_RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT = 50;
-  
+
+  /** Check IP and hostname resolution during nodemanager registration.*/
+  public static final String RM_NM_REGISTRATION_IP_HOSTNAME_CHECK_KEY =
+  RM_PREFIX + "resource-tracker.nm.ip-hostname-check";
+
+  public static final boolean DEFAULT_RM_NM_REGISTRATION_IP_HOSTNAME_CHECK_KEY 
=
+  false;
+
   /** The class to use as the resource scheduler.*/
   public static final String RM_SCHEDULER = 
 RM_PREFIX + "scheduler.class";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bb2801e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e6f7b37..8e9f15b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -265,6 +265,11 @@
   
 
   
+   yarn.resourcemanager.resource-tracker.nm.ip-hostname-check
+   false
+  
+
+  
 Are acls enabled.
 yarn.acl.enable
 false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bb2801e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index b67172e..3d6eda2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 
 import java.io.IOException;
 import java.io.InputStream;
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
@@ -39,6 +40,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.net.NetUtils;
 import 

hadoop git commit: HADOOP-15820. ZStandardDecompressor native code sets an integer field as a long. Contributed by Jason Lowe

2018-10-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 a3c564b01 -> 034e8fc7c


HADOOP-15820. ZStandardDecompressor native code sets an integer field as a 
long. Contributed by Jason Lowe

(cherry picked from commit f13e231025333ebf80b30bbdce1296cef554943b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/034e8fc7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/034e8fc7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/034e8fc7

Branch: refs/heads/branch-2.9
Commit: 034e8fc7ca03d38b713c329280d8b9da73423c26
Parents: a3c564b
Author: Jason Lowe 
Authored: Fri Oct 5 09:06:02 2018 -0500
Committer: Jason Lowe 
Committed: Fri Oct 5 09:32:57 2018 -0500

--
 .../src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/034e8fc7/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
index 1236756..e75a6ef 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
@@ -145,7 +145,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompre
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
 return;
 }
-(*env)->SetLongField(env, this, ZStandardDecompressor_remaining, 0);
+(*env)->SetIntField(env, this, ZStandardDecompressor_remaining, 0);
 }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15820. ZStandardDecompressor native code sets an integer field as a long. Contributed by Jason Lowe

2018-10-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7b88a57c3 -> d7e9ea37b


HADOOP-15820. ZStandardDecompressor native code sets an integer field as a 
long. Contributed by Jason Lowe

(cherry picked from commit f13e231025333ebf80b30bbdce1296cef554943b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7e9ea37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7e9ea37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7e9ea37

Branch: refs/heads/branch-2
Commit: d7e9ea37bf33fd3df86cd889b0f48d6c603eec67
Parents: 7b88a57
Author: Jason Lowe 
Authored: Fri Oct 5 09:06:02 2018 -0500
Committer: Jason Lowe 
Committed: Fri Oct 5 09:30:50 2018 -0500

--
 .../src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7e9ea37/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
index 1236756..e75a6ef 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
@@ -145,7 +145,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompre
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
 return;
 }
-(*env)->SetLongField(env, this, ZStandardDecompressor_remaining, 0);
+(*env)->SetIntField(env, this, ZStandardDecompressor_remaining, 0);
 }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15820. ZStandardDecompressor native code sets an integer field as a long. Contributed by Jason Lowe

2018-10-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 4bad89802 -> bbceae10b


HADOOP-15820. ZStandardDecompressor native code sets an integer field as a 
long. Contributed by Jason Lowe

(cherry picked from commit f13e231025333ebf80b30bbdce1296cef554943b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbceae10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbceae10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbceae10

Branch: refs/heads/branch-3.0
Commit: bbceae10b9b6430b93cc6a76588ff3e45a453433
Parents: 4bad898
Author: Jason Lowe 
Authored: Fri Oct 5 09:06:02 2018 -0500
Committer: Jason Lowe 
Committed: Fri Oct 5 09:29:41 2018 -0500

--
 .../src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbceae10/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
index 1236756..e75a6ef 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
@@ -145,7 +145,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompre
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
 return;
 }
-(*env)->SetLongField(env, this, ZStandardDecompressor_remaining, 0);
+(*env)->SetIntField(env, this, ZStandardDecompressor_remaining, 0);
 }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15820. ZStandardDecompressor native code sets an integer field as a long. Contributed by Jason Lowe

2018-10-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 f3f0a9a2c -> df7ec58ce


HADOOP-15820. ZStandardDecompressor native code sets an integer field as a 
long. Contributed by Jason Lowe

(cherry picked from commit f13e231025333ebf80b30bbdce1296cef554943b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df7ec58c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df7ec58c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df7ec58c

Branch: refs/heads/branch-3.1
Commit: df7ec58ced29fc98db04e2d3f6d2c915055382eb
Parents: f3f0a9a
Author: Jason Lowe 
Authored: Fri Oct 5 09:06:02 2018 -0500
Committer: Jason Lowe 
Committed: Fri Oct 5 09:27:23 2018 -0500

--
 .../src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df7ec58c/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
index 1236756..e75a6ef 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
@@ -145,7 +145,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompre
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
 return;
 }
-(*env)->SetLongField(env, this, ZStandardDecompressor_remaining, 0);
+(*env)->SetIntField(env, this, ZStandardDecompressor_remaining, 0);
 }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15820. ZStandardDecompressor native code sets an integer field as a long. Contributed by Jason Lowe

2018-10-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 25b1e8a48 -> f13e23102


HADOOP-15820. ZStandardDecompressor native code sets an integer field as a 
long. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f13e2310
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f13e2310
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f13e2310

Branch: refs/heads/trunk
Commit: f13e231025333ebf80b30bbdce1296cef554943b
Parents: 25b1e8a
Author: Jason Lowe 
Authored: Fri Oct 5 09:06:02 2018 -0500
Committer: Jason Lowe 
Committed: Fri Oct 5 09:06:02 2018 -0500

--
 .../src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f13e2310/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
index 1236756..e75a6ef 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c
@@ -145,7 +145,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompre
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
 return;
 }
-(*env)->SetLongField(env, this, ZStandardDecompressor_remaining, 0);
+(*env)->SetIntField(env, this, ZStandardDecompressor_remaining, 0);
 }
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked in cluster with 3+ level queues. Contributed by Tao Yang

2018-09-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 552248139 -> 2a9d8da6d


YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked 
in cluster with 3+ level queues. Contributed by Tao Yang

(cherry picked from commit 6b988d821e62d29c118e10a7213583b92c302baf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a9d8da6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a9d8da6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a9d8da6

Branch: refs/heads/branch-2.8
Commit: 2a9d8da6d4b76c141fd8460cc6bd007bec1b27fd
Parents: 5522481
Author: Jason Lowe 
Authored: Wed Sep 26 14:43:00 2018 -0700
Committer: Jason Lowe 
Committed: Wed Sep 26 17:25:18 2018 -0700

--
 .../scheduler/ResourceLimits.java   | 24 ++
 .../scheduler/capacity/ParentQueue.java | 17 ++--
 .../capacity/TestContainerAllocation.java   | 81 
 3 files changed, 116 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9d8da6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
index 721eb36..820d2fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
@@ -38,6 +38,9 @@ public class ResourceLimits {
   // containers.
   private volatile Resource headroom;
 
+  // How much resource should be reserved for high-priority blocked queues
+  private Resource blockedHeadroom;
+
   private boolean allowPreempt = false;
 
   public ResourceLimits(Resource limit) {
@@ -81,4 +84,25 @@ public class ResourceLimits {
   public void setIsAllowPreemption(boolean allowPreempt) {
this.allowPreempt = allowPreempt;
   }
+
+  public void addBlockedHeadroom(Resource resource) {
+if (blockedHeadroom == null) {
+  blockedHeadroom = Resource.newInstance(0, 0);
+}
+Resources.addTo(blockedHeadroom, resource);
+  }
+
+  public Resource getBlockedHeadroom() {
+if (blockedHeadroom == null) {
+  return Resources.none();
+}
+return blockedHeadroom;
+  }
+
+  public Resource getNetLimit() {
+if (blockedHeadroom != null) {
+  return Resources.subtract(limit, blockedHeadroom);
+}
+return limit;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9d8da6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index dfb372f..c7c4473 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -582,7 +582,6 @@ public class ParentQueue extends AbstractCSQueue {
   SchedulingMode schedulingMode) {
 CSAssignment assignment = CSAssignment.NULL_ASSIGNMENT;
 
-Resource parentLimits = limits.getLimit();
 printChildQueues();
 
 // Try to assign to most 'under-served' sub-queue
@@ -596,7 +595,8 @@ public class ParentQueue extends AbstractCSQueue {
 
   // Get ResourceLimits of child queue before assign containers
   ResourceLimits childLimits =
-  getResourceLimitsOfChild(childQueue, cluster, parentLimits, 
node.getPartition());
+  getResourceLimitsOfChild(childQueue, cluster, limits.getNetLimit(),
+ 

hadoop git commit: YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked in cluster with 3+ level queues. Contributed by Tao Yang

2018-09-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 7a0840e25 -> 463d1c312


YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked 
in cluster with 3+ level queues. Contributed by Tao Yang

(cherry picked from commit 6b988d821e62d29c118e10a7213583b92c302baf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/463d1c31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/463d1c31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/463d1c31

Branch: refs/heads/branch-2.9
Commit: 463d1c312b5209bb4c417afdf7c3afe0879ffa75
Parents: 7a0840e
Author: Jason Lowe 
Authored: Wed Sep 26 14:43:00 2018 -0700
Committer: Jason Lowe 
Committed: Wed Sep 26 17:06:47 2018 -0700

--
 .../scheduler/ResourceLimits.java   | 24 ++
 .../scheduler/capacity/ParentQueue.java | 18 +++--
 .../capacity/TestContainerAllocation.java   | 80 
 3 files changed, 115 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/463d1c31/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
index 721eb36..820d2fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
@@ -38,6 +38,9 @@ public class ResourceLimits {
   // containers.
   private volatile Resource headroom;
 
+  // How much resource should be reserved for high-priority blocked queues
+  private Resource blockedHeadroom;
+
   private boolean allowPreempt = false;
 
   public ResourceLimits(Resource limit) {
@@ -81,4 +84,25 @@ public class ResourceLimits {
   public void setIsAllowPreemption(boolean allowPreempt) {
this.allowPreempt = allowPreempt;
   }
+
+  public void addBlockedHeadroom(Resource resource) {
+if (blockedHeadroom == null) {
+  blockedHeadroom = Resource.newInstance(0, 0);
+}
+Resources.addTo(blockedHeadroom, resource);
+  }
+
+  public Resource getBlockedHeadroom() {
+if (blockedHeadroom == null) {
+  return Resources.none();
+}
+return blockedHeadroom;
+  }
+
+  public Resource getNetLimit() {
+if (blockedHeadroom != null) {
+  return Resources.subtract(limit, blockedHeadroom);
+}
+return limit;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/463d1c31/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 6800b74..213bf07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -714,7 +714,6 @@ public class ParentQueue extends AbstractCSQueue {
   SchedulingMode schedulingMode) {
 CSAssignment assignment = CSAssignment.NULL_ASSIGNMENT;
 
-Resource parentLimits = limits.getLimit();
 printChildQueues();
 
 // Try to assign to most 'under-served' sub-queue
@@ -728,9 +727,9 @@ public class ParentQueue extends AbstractCSQueue {
 
   // Get ResourceLimits of child queue before assign containers
   ResourceLimits childLimits =
-  getResourceLimitsOfChild(childQueue, cluster, parentLimits,
+  getResourceLimitsOfChild(childQueue, cluster, limits.getNetLimit(),
   

hadoop git commit: YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked in cluster with 3+ level queues. Contributed by Tao Yang

2018-09-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3d67080a4 -> 1b0a11050


YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked 
in cluster with 3+ level queues. Contributed by Tao Yang

(cherry picked from commit 6b988d821e62d29c118e10a7213583b92c302baf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b0a1105
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b0a1105
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b0a1105

Branch: refs/heads/branch-2
Commit: 1b0a1105014ccfe90f70bb701e7c2d49c4346428
Parents: 3d67080
Author: Jason Lowe 
Authored: Wed Sep 26 14:43:00 2018 -0700
Committer: Jason Lowe 
Committed: Wed Sep 26 17:06:07 2018 -0700

--
 .../scheduler/ResourceLimits.java   | 24 ++
 .../scheduler/capacity/ParentQueue.java | 18 +++--
 .../capacity/TestContainerAllocation.java   | 80 
 3 files changed, 115 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b0a1105/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
index 721eb36..820d2fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
@@ -38,6 +38,9 @@ public class ResourceLimits {
   // containers.
   private volatile Resource headroom;
 
+  // How much resource should be reserved for high-priority blocked queues
+  private Resource blockedHeadroom;
+
   private boolean allowPreempt = false;
 
   public ResourceLimits(Resource limit) {
@@ -81,4 +84,25 @@ public class ResourceLimits {
   public void setIsAllowPreemption(boolean allowPreempt) {
this.allowPreempt = allowPreempt;
   }
+
+  public void addBlockedHeadroom(Resource resource) {
+if (blockedHeadroom == null) {
+  blockedHeadroom = Resource.newInstance(0, 0);
+}
+Resources.addTo(blockedHeadroom, resource);
+  }
+
+  public Resource getBlockedHeadroom() {
+if (blockedHeadroom == null) {
+  return Resources.none();
+}
+return blockedHeadroom;
+  }
+
+  public Resource getNetLimit() {
+if (blockedHeadroom != null) {
+  return Resources.subtract(limit, blockedHeadroom);
+}
+return limit;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b0a1105/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 6800b74..213bf07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -714,7 +714,6 @@ public class ParentQueue extends AbstractCSQueue {
   SchedulingMode schedulingMode) {
 CSAssignment assignment = CSAssignment.NULL_ASSIGNMENT;
 
-Resource parentLimits = limits.getLimit();
 printChildQueues();
 
 // Try to assign to most 'under-served' sub-queue
@@ -728,9 +727,9 @@ public class ParentQueue extends AbstractCSQueue {
 
   // Get ResourceLimits of child queue before assign containers
   ResourceLimits childLimits =
-  getResourceLimitsOfChild(childQueue, cluster, parentLimits,
+  getResourceLimitsOfChild(childQueue, cluster, limits.getNetLimit(),
   ps.getPartition());

hadoop git commit: YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked in cluster with 3+ level queues. Contributed by Tao Yang

2018-09-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b28dacf60 -> 17583e690


YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked 
in cluster with 3+ level queues. Contributed by Tao Yang

(cherry picked from commit 6b988d821e62d29c118e10a7213583b92c302baf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17583e69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17583e69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17583e69

Branch: refs/heads/branch-3.0
Commit: 17583e690a21bdf60c73a1f5ed5fb683d56ce39e
Parents: b28dacf
Author: Jason Lowe 
Authored: Wed Sep 26 14:43:00 2018 -0700
Committer: Jason Lowe 
Committed: Wed Sep 26 16:54:29 2018 -0700

--
 .../scheduler/ResourceLimits.java   | 24 ++
 .../scheduler/capacity/ParentQueue.java | 16 ++--
 .../capacity/TestContainerAllocation.java   | 80 
 3 files changed, 114 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17583e69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
index 721eb36..820d2fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
@@ -38,6 +38,9 @@ public class ResourceLimits {
   // containers.
   private volatile Resource headroom;
 
+  // How much resource should be reserved for high-priority blocked queues
+  private Resource blockedHeadroom;
+
   private boolean allowPreempt = false;
 
   public ResourceLimits(Resource limit) {
@@ -81,4 +84,25 @@ public class ResourceLimits {
   public void setIsAllowPreemption(boolean allowPreempt) {
this.allowPreempt = allowPreempt;
   }
+
+  public void addBlockedHeadroom(Resource resource) {
+if (blockedHeadroom == null) {
+  blockedHeadroom = Resource.newInstance(0, 0);
+}
+Resources.addTo(blockedHeadroom, resource);
+  }
+
+  public Resource getBlockedHeadroom() {
+if (blockedHeadroom == null) {
+  return Resources.none();
+}
+return blockedHeadroom;
+  }
+
+  public Resource getNetLimit() {
+if (blockedHeadroom != null) {
+  return Resources.subtract(limit, blockedHeadroom);
+}
+return limit;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17583e69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 959ca51..7e5f7c2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -715,7 +715,6 @@ public class ParentQueue extends AbstractCSQueue {
   SchedulingMode schedulingMode) {
 CSAssignment assignment = CSAssignment.NULL_ASSIGNMENT;
 
-Resource parentLimits = limits.getLimit();
 printChildQueues();
 
 // Try to assign to most 'under-served' sub-queue
@@ -729,7 +728,7 @@ public class ParentQueue extends AbstractCSQueue {
 
   // Get ResourceLimits of child queue before assign containers
   ResourceLimits childLimits =
-  getResourceLimitsOfChild(childQueue, cluster, parentLimits,
+  getResourceLimitsOfChild(childQueue, cluster, limits.getNetLimit(),
   

hadoop git commit: YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked in cluster with 3+ level queues. Contributed by Tao Yang

2018-09-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 02f8b5da4 -> a56a345e0


YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked 
in cluster with 3+ level queues. Contributed by Tao Yang

(cherry picked from commit 6b988d821e62d29c118e10a7213583b92c302baf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a56a345e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a56a345e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a56a345e

Branch: refs/heads/branch-3.1
Commit: a56a345e079ba90760a3b1fcf52a303a330ef8c7
Parents: 02f8b5d
Author: Jason Lowe 
Authored: Wed Sep 26 14:43:00 2018 -0700
Committer: Jason Lowe 
Committed: Wed Sep 26 16:15:48 2018 -0700

--
 .../scheduler/ResourceLimits.java   | 24 ++
 .../scheduler/capacity/ParentQueue.java | 16 ++--
 .../capacity/TestContainerAllocation.java   | 78 
 3 files changed, 112 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a56a345e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
index 721eb36..820d2fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
@@ -38,6 +38,9 @@ public class ResourceLimits {
   // containers.
   private volatile Resource headroom;
 
+  // How much resource should be reserved for high-priority blocked queues
+  private Resource blockedHeadroom;
+
   private boolean allowPreempt = false;
 
   public ResourceLimits(Resource limit) {
@@ -81,4 +84,25 @@ public class ResourceLimits {
   public void setIsAllowPreemption(boolean allowPreempt) {
this.allowPreempt = allowPreempt;
   }
+
+  public void addBlockedHeadroom(Resource resource) {
+if (blockedHeadroom == null) {
+  blockedHeadroom = Resource.newInstance(0, 0);
+}
+Resources.addTo(blockedHeadroom, resource);
+  }
+
+  public Resource getBlockedHeadroom() {
+if (blockedHeadroom == null) {
+  return Resources.none();
+}
+return blockedHeadroom;
+  }
+
+  public Resource getNetLimit() {
+if (blockedHeadroom != null) {
+  return Resources.subtract(limit, blockedHeadroom);
+}
+return limit;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a56a345e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index e65f063..7eb1c29 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -776,7 +776,6 @@ public class ParentQueue extends AbstractCSQueue {
   SchedulingMode schedulingMode) {
 CSAssignment assignment = CSAssignment.NULL_ASSIGNMENT;
 
-Resource parentLimits = limits.getLimit();
 printChildQueues();
 
 // Try to assign to most 'under-served' sub-queue
@@ -790,7 +789,7 @@ public class ParentQueue extends AbstractCSQueue {
 
   // Get ResourceLimits of child queue before assign containers
   ResourceLimits childLimits =
-  getResourceLimitsOfChild(childQueue, cluster, parentLimits,
+  getResourceLimitsOfChild(childQueue, cluster, limits.getNetLimit(),
   

hadoop git commit: YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked in cluster with 3+ level queues. Contributed by Tao Yang

2018-09-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 913f87dad -> 6b988d821


YARN-8804. resourceLimits may be wrongly calculated when leaf-queue is blocked 
in cluster with 3+ level queues. Contributed by Tao Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b988d82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b988d82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b988d82

Branch: refs/heads/trunk
Commit: 6b988d821e62d29c118e10a7213583b92c302baf
Parents: 913f87d
Author: Jason Lowe 
Authored: Wed Sep 26 14:43:00 2018 -0700
Committer: Jason Lowe 
Committed: Wed Sep 26 14:43:00 2018 -0700

--
 .../scheduler/ResourceLimits.java   | 24 ++
 .../scheduler/capacity/ParentQueue.java | 16 ++--
 .../capacity/TestContainerAllocation.java   | 78 
 3 files changed, 112 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b988d82/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
index 721eb36..820d2fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
@@ -38,6 +38,9 @@ public class ResourceLimits {
   // containers.
   private volatile Resource headroom;
 
+  // How much resource should be reserved for high-priority blocked queues
+  private Resource blockedHeadroom;
+
   private boolean allowPreempt = false;
 
   public ResourceLimits(Resource limit) {
@@ -81,4 +84,25 @@ public class ResourceLimits {
   public void setIsAllowPreemption(boolean allowPreempt) {
this.allowPreempt = allowPreempt;
   }
+
+  public void addBlockedHeadroom(Resource resource) {
+if (blockedHeadroom == null) {
+  blockedHeadroom = Resource.newInstance(0, 0);
+}
+Resources.addTo(blockedHeadroom, resource);
+  }
+
+  public Resource getBlockedHeadroom() {
+if (blockedHeadroom == null) {
+  return Resources.none();
+}
+return blockedHeadroom;
+  }
+
+  public Resource getNetLimit() {
+if (blockedHeadroom != null) {
+  return Resources.subtract(limit, blockedHeadroom);
+}
+return limit;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b988d82/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 80549ca..e32130f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -776,7 +776,6 @@ public class ParentQueue extends AbstractCSQueue {
   SchedulingMode schedulingMode) {
 CSAssignment assignment = CSAssignment.NULL_ASSIGNMENT;
 
-Resource parentLimits = limits.getLimit();
 printChildQueues();
 
 // Try to assign to most 'under-served' sub-queue
@@ -790,7 +789,7 @@ public class ParentQueue extends AbstractCSQueue {
 
   // Get ResourceLimits of child queue before assign containers
   ResourceLimits childLimits =
-  getResourceLimitsOfChild(childQueue, cluster, parentLimits,
+  getResourceLimitsOfChild(childQueue, cluster, limits.getNetLimit(),
   candidates.getPartition());
 
   CSAssignment childAssignment = 

hadoop git commit: YARN-6510. Fix profs stat file warning caused by process names that includes parenthesis. (Wilfred Spiegelenburg via Haibo Chen)

2018-09-21 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7a268528c -> 552248139


YARN-6510. Fix profs stat file warning caused by process names that includes 
parenthesis. (Wilfred Spiegelenburg via Haibo Chen)

(cherry picked from commit 4f3ca0396a810f54f7fd0489a224c1bb13143aa4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55224813
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55224813
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55224813

Branch: refs/heads/branch-2.8
Commit: 552248139b34f5b3f157ac76936fc27ecaf0c9ec
Parents: 7a26852
Author: Haibo Chen 
Authored: Wed Apr 26 11:43:27 2017 -0700
Committer: Jason Lowe 
Committed: Fri Sep 21 15:06:10 2018 -0500

--
 .../org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java  | 2 +-
 .../apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java  | 8 
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55224813/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 77c5655..52f1d0e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -58,7 +58,7 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   private static final String PROCFS = "/proc/";
 
   private static final Pattern PROCFS_STAT_FILE_FORMAT = Pattern.compile(
-  "^([\\d-]+)\\s\\(([^)]+)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
+  "^([\\d-]+)\\s\\((.*)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
   "([\\d-]+)\\s([\\d-]+\\s){7}(\\d+)\\s(\\d+)\\s([\\d-]+\\s){7}(\\d+)\\s" +
   "(\\d+)(\\s[\\d-]+){15}");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55224813/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index 96ec659..4ffc67f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -421,7 +421,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "1", "400", "400",
@@ -565,7 +565,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "1", "300", "300",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "1", "300", "300",
   "30", "300"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "100", "100",
@@ -817,7 +817,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "200", "100",
@@ -840,7 +840,7 @@ public class TestProcfsBasedProcessTree {
   String[] cmdLines = new String[numProcesses];
   cmdLines[0] = "proc1 arg1 arg2";
   cmdLines[1] = "process two arg3 arg4";
-  cmdLines[2] = "proc3 arg5 arg6";

hadoop git commit: YARN-8784. DockerLinuxContainerRuntime prevents access to distributed cache entries on a full disk. Contributed by Eric Badger

2018-09-19 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 b4069343b -> 3fb678729


YARN-8784. DockerLinuxContainerRuntime prevents access to distributed cache 
entries on a full disk. Contributed by Eric Badger

(cherry picked from commit 6b5838ed3220f992092c7348f92f1d9d0d4a3061)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fb67872
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fb67872
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fb67872

Branch: refs/heads/branch-3.1
Commit: 3fb67872959ea1f6c784c5b38e37ea694950ef37
Parents: b406934
Author: Jason Lowe 
Authored: Wed Sep 19 16:44:51 2018 -0500
Committer: Jason Lowe 
Committed: Wed Sep 19 16:49:21 2018 -0500

--
 .../launcher/ContainerLaunch.java   |  5 +-
 .../launcher/TestContainerLaunch.java   | 81 
 2 files changed, 84 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb67872/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 9379cfb..d5bc5ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -249,12 +249,13 @@ public class ContainerLaunch implements Callable 
{
   // accessible by users
   pidFilePath = dirsHandler.getLocalPathForWrite(pidFileSubpath);
   List localDirs = dirsHandler.getLocalDirs();
+  List localDirsForRead = dirsHandler.getLocalDirsForRead();
   List logDirs = dirsHandler.getLogDirs();
-  List filecacheDirs = getNMFilecacheDirs(localDirs);
+  List filecacheDirs = getNMFilecacheDirs(localDirsForRead);
   List userLocalDirs = getUserLocalDirs(localDirs);
   List containerLocalDirs = getContainerLocalDirs(localDirs);
   List containerLogDirs = getContainerLogDirs(logDirs);
-  List userFilecacheDirs = getUserFilecacheDirs(localDirs);
+  List userFilecacheDirs = getUserFilecacheDirs(localDirsForRead);
   List applicationLocalDirs = getApplicationLocalDirs(localDirs,
   appIdStr);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb67872/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 5d77b60..6a30f9e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -96,6 +96,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
 import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
 import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
@@ -107,6 +108,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.Conta
 import 

hadoop git commit: YARN-8784. DockerLinuxContainerRuntime prevents access to distributed cache entries on a full disk. Contributed by Eric Badger

2018-09-19 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk f6bb1ca3c -> 6b5838ed3


YARN-8784. DockerLinuxContainerRuntime prevents access to distributed cache 
entries on a full disk. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b5838ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b5838ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b5838ed

Branch: refs/heads/trunk
Commit: 6b5838ed3220f992092c7348f92f1d9d0d4a3061
Parents: f6bb1ca
Author: Jason Lowe 
Authored: Wed Sep 19 16:44:51 2018 -0500
Committer: Jason Lowe 
Committed: Wed Sep 19 16:44:51 2018 -0500

--
 .../launcher/ContainerLaunch.java   |  5 +-
 .../launcher/TestContainerLaunch.java   | 81 
 2 files changed, 84 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b5838ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 2aca5f8..6347d4e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -250,12 +250,13 @@ public class ContainerLaunch implements Callable 
{
   // accessible by users
   pidFilePath = dirsHandler.getLocalPathForWrite(pidFileSubpath);
   List localDirs = dirsHandler.getLocalDirs();
+  List localDirsForRead = dirsHandler.getLocalDirsForRead();
   List logDirs = dirsHandler.getLogDirs();
-  List filecacheDirs = getNMFilecacheDirs(localDirs);
+  List filecacheDirs = getNMFilecacheDirs(localDirsForRead);
   List userLocalDirs = getUserLocalDirs(localDirs);
   List containerLocalDirs = getContainerLocalDirs(localDirs);
   List containerLogDirs = getContainerLogDirs(logDirs);
-  List userFilecacheDirs = getUserFilecacheDirs(localDirs);
+  List userFilecacheDirs = getUserFilecacheDirs(localDirsForRead);
   List applicationLocalDirs = getApplicationLocalDirs(localDirs,
   appIdStr);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b5838ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index ddf46a6..5714a1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -96,6 +96,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
 import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
 import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
@@ -107,6 +108,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.Conta
 import 

hadoop git commit: MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8499e200f -> 7a268528c


MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement 
RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

(cherry picked from commit 8382b860d4ef4f20d000537ded42a88e98bd2190)

Conflicts:

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a268528
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a268528
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a268528

Branch: refs/heads/branch-2.8
Commit: 7a268528c3bd7a86762e73ecadcb2141f1c6ca72
Parents: 8499e20
Author: Jason Lowe 
Authored: Tue Sep 18 17:06:32 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:27:36 2018 -0500

--
 .../hadoop/mapreduce/v2/app/MRAppBenchmark.java  | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a268528/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 5569722..6bd808a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -33,7 +33,9 @@ import 
org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -114,7 +116,7 @@ public class MRAppBenchmark {
 }
 
 class ThrottledContainerAllocator extends AbstractService 
-implements ContainerAllocator {
+implements ContainerAllocator, RMHeartbeatHandler {
   private int containerCount;
   private Thread thread;
   private BlockingQueue eventQueue =
@@ -179,6 +181,15 @@ public class MRAppBenchmark {
 }
 super.serviceStop();
   }
+
+  @Override
+  public long getLastHeartbeatTime() {
+return Time.now();
+  }
+
+  @Override
+  public void runOnNextHeartbeat(Runnable callback) {
+  }
 }
   }
 
@@ -259,7 +270,7 @@ public class MRAppBenchmark {
 });
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark2() throws Exception {
 int maps = 100; // Adjust for benchmarking, start with a couple of 
thousands
 int reduces = 50;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 4d9b1ec66 -> 8499e200f


MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 34b2237e420cfbe3a97ddd44968de8bbe1ed30ab)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8499e200
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8499e200
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8499e200

Branch: refs/heads/branch-2.8
Commit: 8499e200f24c9516c4fea9da136b6dbde519c902
Parents: 4d9b1ec
Author: Jason Lowe 
Authored: Tue Sep 18 16:56:31 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:27:03 2018 -0500

--
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8499e200/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 744ca10..5569722 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -44,6 +44,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -181,7 +182,7 @@ public class MRAppBenchmark {
 }
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark1() throws Exception {
 int maps = 100; // Adjust for benchmarking. Start with thousands.
 int reduces = 0;
@@ -206,6 +207,7 @@ public class MRAppBenchmark {
 Records.newRecord(RegisterApplicationMasterResponse.class);
 response.setMaximumResourceCapability(Resource.newInstance(
   10240, 1));
+response.setQueue("queue1");
 return response;
   }
 
@@ -247,6 +249,7 @@ public class MRAppBenchmark {
 response.setAllocatedContainers(containers);
 response.setResponseId(request.getResponseId() + 1);
 response.setNumClusterNodes(350);
+response.setApplicationPriority(Priority.newInstance(100));
 return response;
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 dbde2a0e8 -> d6bbb7385


MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement 
RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

(cherry picked from commit 8382b860d4ef4f20d000537ded42a88e98bd2190)

Conflicts:

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6bbb738
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6bbb738
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6bbb738

Branch: refs/heads/branch-2.9
Commit: d6bbb7385baaac15b2a767f04cfc27275067c7ec
Parents: dbde2a0
Author: Jason Lowe 
Authored: Tue Sep 18 17:06:32 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:26:35 2018 -0500

--
 .../hadoop/mapreduce/v2/app/MRAppBenchmark.java  | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6bbb738/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 5569722..6bd808a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -33,7 +33,9 @@ import 
org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -114,7 +116,7 @@ public class MRAppBenchmark {
 }
 
 class ThrottledContainerAllocator extends AbstractService 
-implements ContainerAllocator {
+implements ContainerAllocator, RMHeartbeatHandler {
   private int containerCount;
   private Thread thread;
   private BlockingQueue eventQueue =
@@ -179,6 +181,15 @@ public class MRAppBenchmark {
 }
 super.serviceStop();
   }
+
+  @Override
+  public long getLastHeartbeatTime() {
+return Time.now();
+  }
+
+  @Override
+  public void runOnNextHeartbeat(Runnable callback) {
+  }
 }
   }
 
@@ -259,7 +270,7 @@ public class MRAppBenchmark {
 });
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark2() throws Exception {
 int maps = 100; // Adjust for benchmarking, start with a couple of 
thousands
 int reduces = 50;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 f116868a5 -> dbde2a0e8


MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 34b2237e420cfbe3a97ddd44968de8bbe1ed30ab)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbde2a0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbde2a0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbde2a0e

Branch: refs/heads/branch-2.9
Commit: dbde2a0e8499ae543f01ebc2840d8bc4973e0d18
Parents: f116868
Author: Jason Lowe 
Authored: Tue Sep 18 16:56:31 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:23:33 2018 -0500

--
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbde2a0e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 744ca10..5569722 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -44,6 +44,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -181,7 +182,7 @@ public class MRAppBenchmark {
 }
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark1() throws Exception {
 int maps = 100; // Adjust for benchmarking. Start with thousands.
 int reduces = 0;
@@ -206,6 +207,7 @@ public class MRAppBenchmark {
 Records.newRecord(RegisterApplicationMasterResponse.class);
 response.setMaximumResourceCapability(Resource.newInstance(
   10240, 1));
+response.setQueue("queue1");
 return response;
   }
 
@@ -247,6 +249,7 @@ public class MRAppBenchmark {
 response.setAllocatedContainers(containers);
 response.setResponseId(request.getResponseId() + 1);
 response.setNumClusterNodes(350);
+response.setApplicationPriority(Priority.newInstance(100));
 return response;
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f5cc9f71e -> 726aee360


MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement 
RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

(cherry picked from commit 8382b860d4ef4f20d000537ded42a88e98bd2190)

Conflicts:

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/726aee36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/726aee36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/726aee36

Branch: refs/heads/branch-2
Commit: 726aee360a8c96ad6726ac69817959ed66992695
Parents: f5cc9f7
Author: Jason Lowe 
Authored: Tue Sep 18 17:06:32 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:19:21 2018 -0500

--
 .../hadoop/mapreduce/v2/app/MRAppBenchmark.java  | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/726aee36/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 5569722..6bd808a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -33,7 +33,9 @@ import 
org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -114,7 +116,7 @@ public class MRAppBenchmark {
 }
 
 class ThrottledContainerAllocator extends AbstractService 
-implements ContainerAllocator {
+implements ContainerAllocator, RMHeartbeatHandler {
   private int containerCount;
   private Thread thread;
   private BlockingQueue eventQueue =
@@ -179,6 +181,15 @@ public class MRAppBenchmark {
 }
 super.serviceStop();
   }
+
+  @Override
+  public long getLastHeartbeatTime() {
+return Time.now();
+  }
+
+  @Override
+  public void runOnNextHeartbeat(Runnable callback) {
+  }
 }
   }
 
@@ -259,7 +270,7 @@ public class MRAppBenchmark {
 });
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark2() throws Exception {
 int maps = 100; // Adjust for benchmarking, start with a couple of 
thousands
 int reduces = 50;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 88e2ca405 -> f5cc9f71e


MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 34b2237e420cfbe3a97ddd44968de8bbe1ed30ab)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5cc9f71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5cc9f71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5cc9f71

Branch: refs/heads/branch-2
Commit: f5cc9f71e6ba733ee812bbb0d907206bfc345443
Parents: 88e2ca4
Author: Jason Lowe 
Authored: Tue Sep 18 16:56:31 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:15:58 2018 -0500

--
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5cc9f71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 744ca10..5569722 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -44,6 +44,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -181,7 +182,7 @@ public class MRAppBenchmark {
 }
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark1() throws Exception {
 int maps = 100; // Adjust for benchmarking. Start with thousands.
 int reduces = 0;
@@ -206,6 +207,7 @@ public class MRAppBenchmark {
 Records.newRecord(RegisterApplicationMasterResponse.class);
 response.setMaximumResourceCapability(Resource.newInstance(
   10240, 1));
+response.setQueue("queue1");
 return response;
   }
 
@@ -247,6 +249,7 @@ public class MRAppBenchmark {
 response.setAllocatedContainers(containers);
 response.setResponseId(request.getResponseId() + 1);
 response.setNumClusterNodes(350);
+response.setApplicationPriority(Priority.newInstance(100));
 return response;
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 4a2a7d37d -> f5ebb2ccf


MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement 
RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

(cherry picked from commit 8382b860d4ef4f20d000537ded42a88e98bd2190)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5ebb2cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5ebb2cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5ebb2cc

Branch: refs/heads/branch-3.0
Commit: f5ebb2ccf5255cbfa3cafe21fa058be526e01ed2
Parents: 4a2a7d3
Author: Jason Lowe 
Authored: Tue Sep 18 17:06:32 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:15:17 2018 -0500

--
 .../hadoop/mapreduce/v2/app/MRAppBenchmark.java  | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5ebb2cc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 5e6697b..efe150f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -33,10 +33,12 @@ import 
org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
 import org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy;
 import org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -116,7 +118,7 @@ public class MRAppBenchmark {
 }
 
 class ThrottledContainerAllocator extends AbstractService 
-implements ContainerAllocator {
+implements ContainerAllocator, RMHeartbeatHandler {
   private int containerCount;
   private Thread thread;
   private BlockingQueue eventQueue =
@@ -182,6 +184,15 @@ public class MRAppBenchmark {
 }
 super.serviceStop();
   }
+
+  @Override
+  public long getLastHeartbeatTime() {
+return Time.now();
+  }
+
+  @Override
+  public void runOnNextHeartbeat(Runnable callback) {
+  }
 }
   }
 
@@ -264,7 +275,7 @@ public class MRAppBenchmark {
 });
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark2() throws Exception {
 int maps = 100; // Adjust for benchmarking, start with a couple of 
thousands
 int reduces = 50;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 f2eeba5c5 -> 4a2a7d37d


MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 34b2237e420cfbe3a97ddd44968de8bbe1ed30ab)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a2a7d37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a2a7d37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a2a7d37

Branch: refs/heads/branch-3.0
Commit: 4a2a7d37ddcd8c31c8b0c9c46754f74affc88c67
Parents: f2eeba5
Author: Jason Lowe 
Authored: Tue Sep 18 16:56:31 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:13:00 2018 -0500

--
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2a7d37/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 025a8fa..5e6697b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -184,7 +185,7 @@ public class MRAppBenchmark {
 }
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark1() throws Exception {
 int maps = 100; // Adjust for benchmarking. Start with thousands.
 int reduces = 0;
@@ -211,6 +212,7 @@ public class MRAppBenchmark {
 Records.newRecord(RegisterApplicationMasterResponse.class);
 response.setMaximumResourceCapability(Resource.newInstance(
   10240, 1));
+response.setQueue("queue1");
 return response;
   }
 
@@ -252,6 +254,7 @@ public class MRAppBenchmark {
 response.setAllocatedContainers(containers);
 response.setResponseId(request.getResponseId() + 1);
 response.setNumClusterNodes(350);
+response.setApplicationPriority(Priority.newInstance(100));
 return response;
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 85ae097f3 -> 3a13fa1b7


MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement 
RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

(cherry picked from commit 8382b860d4ef4f20d000537ded42a88e98bd2190)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a13fa1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a13fa1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a13fa1b

Branch: refs/heads/branch-3.1
Commit: 3a13fa1b7b034ced8c9c57b07c78e9923edd9551
Parents: 85ae097
Author: Jason Lowe 
Authored: Tue Sep 18 17:06:32 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:12:11 2018 -0500

--
 .../hadoop/mapreduce/v2/app/MRAppBenchmark.java  | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a13fa1b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 5e6697b..efe150f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -33,10 +33,12 @@ import 
org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
 import org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy;
 import org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -116,7 +118,7 @@ public class MRAppBenchmark {
 }
 
 class ThrottledContainerAllocator extends AbstractService 
-implements ContainerAllocator {
+implements ContainerAllocator, RMHeartbeatHandler {
   private int containerCount;
   private Thread thread;
   private BlockingQueue eventQueue =
@@ -182,6 +184,15 @@ public class MRAppBenchmark {
 }
 super.serviceStop();
   }
+
+  @Override
+  public long getLastHeartbeatTime() {
+return Time.now();
+  }
+
+  @Override
+  public void runOnNextHeartbeat(Runnable callback) {
+  }
 }
   }
 
@@ -264,7 +275,7 @@ public class MRAppBenchmark {
 });
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark2() throws Exception {
 int maps = 100; // Adjust for benchmarking, start with a couple of 
thousands
 int reduces = 50;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 610b00bdf -> 85ae097f3


MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 34b2237e420cfbe3a97ddd44968de8bbe1ed30ab)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85ae097f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85ae097f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85ae097f

Branch: refs/heads/branch-3.1
Commit: 85ae097f3c728dd49b0deaf5fbace2186e72d678
Parents: 610b00b
Author: Jason Lowe 
Authored: Tue Sep 18 16:56:31 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:08:35 2018 -0500

--
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85ae097f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 025a8fa..5e6697b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -184,7 +185,7 @@ public class MRAppBenchmark {
 }
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark1() throws Exception {
 int maps = 100; // Adjust for benchmarking. Start with thousands.
 int reduces = 0;
@@ -211,6 +212,7 @@ public class MRAppBenchmark {
 Records.newRecord(RegisterApplicationMasterResponse.class);
 response.setMaximumResourceCapability(Resource.newInstance(
   10240, 1));
+response.setQueue("queue1");
 return response;
   }
 
@@ -252,6 +254,7 @@ public class MRAppBenchmark {
 response.setAllocatedContainers(containers);
 response.setResponseId(request.getResponseId() + 1);
 response.setNumClusterNodes(350);
+response.setApplicationPriority(Priority.newInstance(100));
 return response;
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 34b2237e4 -> 8382b860d


MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement 
RMHeartbeatHandler. Contributed by Oleksandr Shevchenko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8382b860
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8382b860
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8382b860

Branch: refs/heads/trunk
Commit: 8382b860d4ef4f20d000537ded42a88e98bd2190
Parents: 34b2237
Author: Jason Lowe 
Authored: Tue Sep 18 17:06:32 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 17:06:32 2018 -0500

--
 .../hadoop/mapreduce/v2/app/MRAppBenchmark.java  | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8382b860/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 5e6697b..efe150f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -33,10 +33,12 @@ import 
org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
 import org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy;
 import org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -116,7 +118,7 @@ public class MRAppBenchmark {
 }
 
 class ThrottledContainerAllocator extends AbstractService 
-implements ContainerAllocator {
+implements ContainerAllocator, RMHeartbeatHandler {
   private int containerCount;
   private Thread thread;
   private BlockingQueue eventQueue =
@@ -182,6 +184,15 @@ public class MRAppBenchmark {
 }
 super.serviceStop();
   }
+
+  @Override
+  public long getLastHeartbeatTime() {
+return Time.now();
+  }
+
+  @Override
+  public void runOnNextHeartbeat(Runnable callback) {
+  }
 }
   }
 
@@ -264,7 +275,7 @@ public class MRAppBenchmark {
 });
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark2() throws Exception {
 int maps = 100; // Adjust for benchmarking, start with a couple of 
thousands
 int reduces = 50;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. Contributed by Oleksandr Shevchenko

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5c2ae7e49 -> 34b2237e4


MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. 
Contributed by Oleksandr Shevchenko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34b2237e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34b2237e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34b2237e

Branch: refs/heads/trunk
Commit: 34b2237e420cfbe3a97ddd44968de8bbe1ed30ab
Parents: 5c2ae7e
Author: Jason Lowe 
Authored: Tue Sep 18 16:56:31 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 16:56:31 2018 -0500

--
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34b2237e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 025a8fa..5e6697b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -184,7 +185,7 @@ public class MRAppBenchmark {
 }
   }
 
-  @Test
+  @Test(timeout = 6)
   public void benchmark1() throws Exception {
 int maps = 100; // Adjust for benchmarking. Start with thousands.
 int reduces = 0;
@@ -211,6 +212,7 @@ public class MRAppBenchmark {
 Records.newRecord(RegisterApplicationMasterResponse.class);
 response.setMaximumResourceCapability(Resource.newInstance(
   10240, 1));
+response.setQueue("queue1");
 return response;
   }
 
@@ -252,6 +254,7 @@ public class MRAppBenchmark {
 response.setAllocatedContainers(containers);
 response.setResponseId(request.getResponseId() + 1);
 response.setNumClusterNodes(350);
+response.setApplicationPriority(Priority.newInstance(100));
 return response;
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is null. Contributed by Lokesh Jain and Dinesh Chitlangia

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 9208602a6 -> 5b270b53b


HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is 
null. Contributed by Lokesh Jain and Dinesh Chitlangia

(cherry picked from commit e71f61ecb87e04727a5a76e578a75714c9db6706)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b270b53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b270b53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b270b53

Branch: refs/heads/branch-2.9
Commit: 5b270b53bebad2364f3210c3ae7f6d8988010e59
Parents: 9208602
Author: Jason Lowe 
Authored: Tue Sep 18 15:55:09 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 16:08:22 2018 -0500

--
 .../src/main/java/org/apache/hadoop/util/StringUtils.java   | 2 +-
 .../test/java/org/apache/hadoop/util/TestStringUtils.java   | 9 +
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b270b53/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 67b61c0..a3b8555 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -732,7 +732,7 @@ public class StringUtils {
 return toStartupShutdownString("STARTUP_MSG: ", new String[] {
 "Starting " + classname,
 "  host = " + hostname,
-"  args = " + Arrays.asList(args),
+"  args = " + (args != null ? Arrays.asList(args) : new ArrayList<>()),
 "  version = " + VersionInfo.getVersion(),
 "  classpath = " + System.getProperty("java.class.path"),
 "  build = " + VersionInfo.getUrl() + " -r "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b270b53/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 1f474f8..6b9a7d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -476,6 +476,15 @@ public class TestStringUtils extends UnitTestcaseTimeLimit 
{
 executorService.awaitTermination(50, TimeUnit.SECONDS);
   }
 
+  @Test
+  public void testCreateStartupShutdownMessage() {
+//pass null args and method must still return a string beginning with
+// "STARTUP_MSG"
+String msg = StringUtils.createStartupShutdownMessage(
+this.getClass().getName(), "test.host", null);
+assertTrue(msg.startsWith("STARTUP_MSG:"));
+  }
+
   // Benchmark for StringUtils split
   public static void main(String []args) {
 final String TO_SPLIT = "foo,bar,baz,blah,blah";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is null. Contributed by Lokesh Jain and Dinesh Chitlangia

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 97a13eb04 -> 88e2ca405


HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is 
null. Contributed by Lokesh Jain and Dinesh Chitlangia

(cherry picked from commit e71f61ecb87e04727a5a76e578a75714c9db6706)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88e2ca40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88e2ca40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88e2ca40

Branch: refs/heads/branch-2
Commit: 88e2ca405a539ce18dd40fd85c37198dbeb86bf1
Parents: 97a13eb
Author: Jason Lowe 
Authored: Tue Sep 18 15:55:09 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 16:05:47 2018 -0500

--
 .../src/main/java/org/apache/hadoop/util/StringUtils.java   | 2 +-
 .../test/java/org/apache/hadoop/util/TestStringUtils.java   | 9 +
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88e2ca40/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 67b61c0..a3b8555 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -732,7 +732,7 @@ public class StringUtils {
 return toStartupShutdownString("STARTUP_MSG: ", new String[] {
 "Starting " + classname,
 "  host = " + hostname,
-"  args = " + Arrays.asList(args),
+"  args = " + (args != null ? Arrays.asList(args) : new ArrayList<>()),
 "  version = " + VersionInfo.getVersion(),
 "  classpath = " + System.getProperty("java.class.path"),
 "  build = " + VersionInfo.getUrl() + " -r "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88e2ca40/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 1f474f8..6b9a7d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -476,6 +476,15 @@ public class TestStringUtils extends UnitTestcaseTimeLimit 
{
 executorService.awaitTermination(50, TimeUnit.SECONDS);
   }
 
+  @Test
+  public void testCreateStartupShutdownMessage() {
+//pass null args and method must still return a string beginning with
+// "STARTUP_MSG"
+String msg = StringUtils.createStartupShutdownMessage(
+this.getClass().getName(), "test.host", null);
+assertTrue(msg.startsWith("STARTUP_MSG:"));
+  }
+
   // Benchmark for StringUtils split
   public static void main(String []args) {
 final String TO_SPLIT = "foo,bar,baz,blah,blah";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is null. Contributed by Lokesh Jain and Dinesh Chitlangia

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 373c7a0ce -> f2eeba5c5


HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is 
null. Contributed by Lokesh Jain and Dinesh Chitlangia

(cherry picked from commit e71f61ecb87e04727a5a76e578a75714c9db6706)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2eeba5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2eeba5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2eeba5c

Branch: refs/heads/branch-3.0
Commit: f2eeba5c5bffbed7f3263208fa0170cc452a0237
Parents: 373c7a0
Author: Jason Lowe 
Authored: Tue Sep 18 15:55:09 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 16:03:42 2018 -0500

--
 .../src/main/java/org/apache/hadoop/util/StringUtils.java   | 2 +-
 .../test/java/org/apache/hadoop/util/TestStringUtils.java   | 9 +
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2eeba5c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index cda5ec7..05c8928 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -743,7 +743,7 @@ public class StringUtils {
 return toStartupShutdownString("STARTUP_MSG: ", new String[] {
 "Starting " + classname,
 "  host = " + hostname,
-"  args = " + Arrays.asList(args),
+"  args = " + (args != null ? Arrays.asList(args) : new ArrayList<>()),
 "  version = " + VersionInfo.getVersion(),
 "  classpath = " + System.getProperty("java.class.path"),
 "  build = " + VersionInfo.getUrl() + " -r "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2eeba5c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 1f474f8..6b9a7d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -476,6 +476,15 @@ public class TestStringUtils extends UnitTestcaseTimeLimit 
{
 executorService.awaitTermination(50, TimeUnit.SECONDS);
   }
 
+  @Test
+  public void testCreateStartupShutdownMessage() {
+//pass null args and method must still return a string beginning with
+// "STARTUP_MSG"
+String msg = StringUtils.createStartupShutdownMessage(
+this.getClass().getName(), "test.host", null);
+assertTrue(msg.startsWith("STARTUP_MSG:"));
+  }
+
   // Benchmark for StringUtils split
   public static void main(String []args) {
 final String TO_SPLIT = "foo,bar,baz,blah,blah";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is null. Contributed by Lokesh Jain and Dinesh Chitlangia

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 3d77094cf -> 610b00bdf


HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is 
null. Contributed by Lokesh Jain and Dinesh Chitlangia

(cherry picked from commit e71f61ecb87e04727a5a76e578a75714c9db6706)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/610b00bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/610b00bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/610b00bd

Branch: refs/heads/branch-3.1
Commit: 610b00bdf40cbfa3bb18975ad1334f8fb790a1d4
Parents: 3d77094
Author: Jason Lowe 
Authored: Tue Sep 18 15:55:09 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 16:00:48 2018 -0500

--
 .../src/main/java/org/apache/hadoop/util/StringUtils.java   | 2 +-
 .../test/java/org/apache/hadoop/util/TestStringUtils.java   | 9 +
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/610b00bd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index ebe7013..b29d8c2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -743,7 +743,7 @@ public class StringUtils {
 return toStartupShutdownString("STARTUP_MSG: ", new String[] {
 "Starting " + classname,
 "  host = " + hostname,
-"  args = " + Arrays.asList(args),
+"  args = " + (args != null ? Arrays.asList(args) : new ArrayList<>()),
 "  version = " + VersionInfo.getVersion(),
 "  classpath = " + System.getProperty("java.class.path"),
 "  build = " + VersionInfo.getUrl() + " -r "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/610b00bd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 1f474f8..6b9a7d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -476,6 +476,15 @@ public class TestStringUtils extends UnitTestcaseTimeLimit 
{
 executorService.awaitTermination(50, TimeUnit.SECONDS);
   }
 
+  @Test
+  public void testCreateStartupShutdownMessage() {
+//pass null args and method must still return a string beginning with
+// "STARTUP_MSG"
+String msg = StringUtils.createStartupShutdownMessage(
+this.getClass().getName(), "test.host", null);
+assertTrue(msg.startsWith("STARTUP_MSG:"));
+  }
+
   // Benchmark for StringUtils split
   public static void main(String []args) {
 final String TO_SPLIT = "foo,bar,baz,blah,blah";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is null. Contributed by Lokesh Jain and Dinesh Chitlangia

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 589637276 -> e71f61ecb


HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is 
null. Contributed by Lokesh Jain and Dinesh Chitlangia


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e71f61ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e71f61ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e71f61ec

Branch: refs/heads/trunk
Commit: e71f61ecb87e04727a5a76e578a75714c9db6706
Parents: 5896372
Author: Jason Lowe 
Authored: Tue Sep 18 15:55:09 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 15:57:33 2018 -0500

--
 .../src/main/java/org/apache/hadoop/util/StringUtils.java   | 2 +-
 .../test/java/org/apache/hadoop/util/TestStringUtils.java   | 9 +
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e71f61ec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 3db805f..f49698c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -743,7 +743,7 @@ public class StringUtils {
 return toStartupShutdownString("STARTUP_MSG: ", new String[] {
 "Starting " + classname,
 "  host = " + hostname,
-"  args = " + Arrays.asList(args),
+"  args = " + (args != null ? Arrays.asList(args) : new ArrayList<>()),
 "  version = " + VersionInfo.getVersion(),
 "  classpath = " + System.getProperty("java.class.path"),
 "  build = " + VersionInfo.getUrl() + " -r "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e71f61ec/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 3fdc1bb..f05b589 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -503,6 +503,15 @@ public class TestStringUtils extends UnitTestcaseTimeLimit 
{
 escapedStr, StringUtils.escapeHTML(htmlStr));
   }
 
+  @Test
+  public void testCreateStartupShutdownMessage() {
+//pass null args and method must still return a string beginning with
+// "STARTUP_MSG"
+String msg = StringUtils.createStartupShutdownMessage(
+this.getClass().getName(), "test.host", null);
+assertTrue(msg.startsWith("STARTUP_MSG:"));
+  }
+
   // Benchmark for StringUtils split
   public static void main(String []args) {
 final String TO_SPLIT = "foo,bar,baz,blah,blah";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8648. Container cgroups are leaked when using docker. Contributed by Jim Brennan

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 af2390cf4 -> 3d77094cf


YARN-8648. Container cgroups are leaked when using docker. Contributed by Jim 
Brennan

(cherry picked from commit 2df0a8dcb3dfde15d216481cc1296d97d2cb5d43)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d77094c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d77094c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d77094c

Branch: refs/heads/branch-3.1
Commit: 3d77094cf2cb8c4741866aa7dd4fcd8b3c454bc3
Parents: af2390c
Author: Jason Lowe 
Authored: Tue Sep 18 15:28:04 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 15:43:10 2018 -0500

--
 .../nodemanager/LinuxContainerExecutor.java |   3 +-
 .../linux/resources/ResourceHandlerModule.java  |  15 ++
 .../runtime/DockerLinuxContainerRuntime.java|   3 +-
 .../linux/runtime/docker/DockerRmCommand.java   |  11 +-
 .../impl/container-executor.c   | 153 ++-
 .../impl/container-executor.h   |   8 +-
 .../main/native/container-executor/impl/main.c  |  12 +-
 .../test/test-container-executor.c  | 147 ++
 .../docker/TestDockerCommandExecutor.java   |  23 ++-
 .../runtime/docker/TestDockerRmCommand.java |  35 -
 10 files changed, 393 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d77094c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index b3c9d5f..fccf668 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -937,7 +937,8 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   DockerCommandExecutor.getContainerStatus(containerId, privOpExecutor,
   nmContext))) {
 LOG.info("Removing Docker container : " + containerId);
-DockerRmCommand dockerRmCommand = new DockerRmCommand(containerId);
+DockerRmCommand dockerRmCommand = new DockerRmCommand(containerId,
+ResourceHandlerModule.getCgroupsRelativeRoot());
 DockerCommandExecutor.executeDockerCommand(dockerRmCommand, 
containerId,
 null, privOpExecutor, false, nmContext);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d77094c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
index fc55696..f8a3193 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
@@ -101,6 +101,21 @@ public class ResourceHandlerModule {
 return cGroupsHandler;
   }
 
+  /**
+   * Returns relative root for cgroups.  Returns null if cGroupsHandler is
+   * not initialized, or if the path is empty.
+   */
+  public static String getCgroupsRelativeRoot() {
+if (cGroupsHandler == null) {
+  return null;
+}
+String cGroupPath = cGroupsHandler.getRelativePathForCGroup("");
+if (cGroupPath == null || cGroupPath.isEmpty()) {
+  return null;
+}
+return cGroupPath.replaceAll("/$", "");
+  }
+
   public static NetworkPacketTaggingHandlerImpl
   

hadoop git commit: YARN-8648. Container cgroups are leaked when using docker. Contributed by Jim Brennan

2018-09-18 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 295cce39e -> 2df0a8dcb


YARN-8648. Container cgroups are leaked when using docker. Contributed by Jim 
Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2df0a8dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2df0a8dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2df0a8dc

Branch: refs/heads/trunk
Commit: 2df0a8dcb3dfde15d216481cc1296d97d2cb5d43
Parents: 295cce3
Author: Jason Lowe 
Authored: Tue Sep 18 15:28:04 2018 -0500
Committer: Jason Lowe 
Committed: Tue Sep 18 15:36:45 2018 -0500

--
 .../nodemanager/LinuxContainerExecutor.java |   3 +-
 .../linux/resources/ResourceHandlerModule.java  |  15 ++
 .../runtime/DockerLinuxContainerRuntime.java|   3 +-
 .../linux/runtime/docker/DockerRmCommand.java   |  11 +-
 .../impl/container-executor.c   | 153 ++-
 .../impl/container-executor.h   |   8 +-
 .../main/native/container-executor/impl/main.c  |  12 +-
 .../test/test-container-executor.c  | 147 ++
 .../docker/TestDockerCommandExecutor.java   |  23 ++-
 .../runtime/docker/TestDockerRmCommand.java |  35 -
 10 files changed, 393 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2df0a8dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index b3c9d5f..fccf668 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -937,7 +937,8 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   DockerCommandExecutor.getContainerStatus(containerId, privOpExecutor,
   nmContext))) {
 LOG.info("Removing Docker container : " + containerId);
-DockerRmCommand dockerRmCommand = new DockerRmCommand(containerId);
+DockerRmCommand dockerRmCommand = new DockerRmCommand(containerId,
+ResourceHandlerModule.getCgroupsRelativeRoot());
 DockerCommandExecutor.executeDockerCommand(dockerRmCommand, 
containerId,
 null, privOpExecutor, false, nmContext);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2df0a8dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
index fc55696..f8a3193 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
@@ -101,6 +101,21 @@ public class ResourceHandlerModule {
 return cGroupsHandler;
   }
 
+  /**
+   * Returns relative root for cgroups.  Returns null if cGroupsHandler is
+   * not initialized, or if the path is empty.
+   */
+  public static String getCgroupsRelativeRoot() {
+if (cGroupsHandler == null) {
+  return null;
+}
+String cGroupPath = cGroupsHandler.getRelativePathForCGroup("");
+if (cGroupPath == null || cGroupPath.isEmpty()) {
+  return null;
+}
+return cGroupPath.replaceAll("/$", "");
+  }
+
   public static NetworkPacketTaggingHandlerImpl
   getNetworkResourceHandler() {
 return networkPacketTaggingHandlerImpl;


hadoop git commit: MAPREDUCE-7140. Refactoring TaskAttemptInfo to separate Map and Reduce tasks. Contributed by Oleksandr Shevchenko

2018-09-14 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5470de420 -> 488806bac


MAPREDUCE-7140. Refactoring TaskAttemptInfo to separate Map and Reduce tasks. 
Contributed by Oleksandr Shevchenko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/488806ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/488806ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/488806ba

Branch: refs/heads/trunk
Commit: 488806baca6d84c12b24532ddeacf6d249c2136b
Parents: 5470de4
Author: Jason Lowe 
Authored: Fri Sep 14 15:04:18 2018 -0500
Committer: Jason Lowe 
Committed: Fri Sep 14 15:10:27 2018 -0500

--
 .../mapreduce/v2/app/webapp/AMWebServices.java  |  9 +++--
 .../v2/app/webapp/JAXBContextResolver.java  |  7 ++--
 .../mapreduce/v2/app/webapp/TaskPage.java   |  3 +-
 .../v2/app/webapp/dao/MapTaskAttemptInfo.java   | 39 
 .../app/webapp/dao/ReduceTaskAttemptInfo.java   | 11 +++---
 .../v2/app/webapp/dao/TaskAttemptInfo.java  | 14 +++
 .../v2/app/webapp/dao/TaskAttemptsInfo.java | 10 ++---
 .../mapreduce/v2/hs/webapp/HsTaskPage.java  |  3 +-
 .../mapreduce/v2/hs/webapp/HsTasksBlock.java|  5 ++-
 .../mapreduce/v2/hs/webapp/HsWebServices.java   |  9 +++--
 .../v2/hs/webapp/JAXBContextResolver.java   |  4 +-
 11 files changed, 77 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/488806ba/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
index f477d31..fe3ace8 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobsInfo;
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.MapTaskAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo;
@@ -396,9 +397,9 @@ public class AMWebServices {
 for (TaskAttempt ta : task.getAttempts().values()) {
   if (ta != null) {
 if (task.getType() == TaskType.REDUCE) {
-  attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
+  attempts.add(new ReduceTaskAttemptInfo(ta));
 } else {
-  attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
+  attempts.add(new MapTaskAttemptInfo(ta, true));
 }
   }
 }
@@ -419,9 +420,9 @@ public class AMWebServices {
 Task task = getTaskFromTaskIdString(tid, job);
 TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
 if (task.getType() == TaskType.REDUCE) {
-  return new ReduceTaskAttemptInfo(ta, task.getType());
+  return new ReduceTaskAttemptInfo(ta);
 } else {
-  return new TaskAttemptInfo(ta, task.getType(), true);
+  return new MapTaskAttemptInfo(ta, true);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/488806ba/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
index 88c7d86..625eb4e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
+++ 

hadoop git commit: MAPREDUCE-7133. History Server task attempts REST API returns invalid data. Contributed by Oleksandr Shevchenko

2018-09-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 feefa57e4 -> 991461340


MAPREDUCE-7133. History Server task attempts REST API returns invalid data. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 2886024ac3a8613ecc27f1595b278ce6fc2d03ba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99146134
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99146134
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99146134

Branch: refs/heads/branch-2.7
Commit: 991461340a79733123d7bbf83168f29157136f38
Parents: feefa57
Author: Jason Lowe 
Authored: Thu Sep 13 15:07:18 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 15:07:18 2018 -0500

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java | 8 +---
 .../mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java   | 3 +++
 .../src/site/markdown/MapredAppMasterRest.md | 2 +-
 4 files changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99146134/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index cb5571c..bd15b99 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -16,6 +16,9 @@ Release 2.7.8 - UNRELEASED
 files from intermediate to finished but thinks file is in
 intermediate. (Anthony Hsu via jlowe)
 
+MAPREDUCE-7133. History Server task attempts REST API returns invalid
+data. (Oleksandr Shevchenko via jlowe)
+
 Release 2.7.7 - 2018-07-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99146134/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
index c92488f..6f188d9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
@@ -19,12 +19,10 @@ package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
 
 import java.util.ArrayList;
 
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlRootElement;
 
 @XmlRootElement(name = "taskAttempts")
-@XmlAccessorType(XmlAccessType.FIELD)
 public class TaskAttemptsInfo {
 
   protected ArrayList taskAttempt = new 
ArrayList();
@@ -36,6 +34,10 @@ public class TaskAttemptsInfo {
 taskAttempt.add(taskattemptInfo);
   }
 
+  // XmlElementRef annotation should be used to identify the exact type of a 
list element
+  // otherwise metadata will be added to XML attributes,
+  // it can lead to incorrect JSON marshaling
+  @XmlElementRef
   public ArrayList getTaskAttempts() {
 return taskAttempt;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99146134/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
index dcd5d29..fb00dbb 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
@@ -20,6 +20,7 @@
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 

hadoop git commit: MAPREDUCE-7133. History Server task attempts REST API returns invalid data. Contributed by Oleksandr Shevchenko

2018-09-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 e4b7f2404 -> 7f7a3c85c


MAPREDUCE-7133. History Server task attempts REST API returns invalid data. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 2886024ac3a8613ecc27f1595b278ce6fc2d03ba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f7a3c85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f7a3c85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f7a3c85

Branch: refs/heads/branch-2.8
Commit: 7f7a3c85cc3a7a176d30294f3bf782e14aa96bcb
Parents: e4b7f24
Author: Jason Lowe 
Authored: Thu Sep 13 14:41:38 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 14:58:29 2018 -0500

--
 .../hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java | 8 +---
 .../mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java   | 3 +++
 .../src/site/markdown/MapredAppMasterRest.md | 2 +-
 3 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f7a3c85/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
index c92488f..6f188d9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
@@ -19,12 +19,10 @@ package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
 
 import java.util.ArrayList;
 
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlRootElement;
 
 @XmlRootElement(name = "taskAttempts")
-@XmlAccessorType(XmlAccessType.FIELD)
 public class TaskAttemptsInfo {
 
   protected ArrayList taskAttempt = new 
ArrayList();
@@ -36,6 +34,10 @@ public class TaskAttemptsInfo {
 taskAttempt.add(taskattemptInfo);
   }
 
+  // XmlElementRef annotation should be used to identify the exact type of a 
list element
+  // otherwise metadata will be added to XML attributes,
+  // it can lead to incorrect JSON marshaling
+  @XmlElementRef
   public ArrayList getTaskAttempts() {
 return taskAttempt;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f7a3c85/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
index 3c9127f..10b0e02 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
@@ -20,6 +20,7 @@
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -477,6 +478,8 @@ public class TestAMWebServicesAttempts extends JerseyTest {
   Boolean found = false;
   for (int i = 0; i < nodes.getLength(); i++) {
 Element element = (Element) nodes.item(i);
+assertFalse("task attempt should not contain any attributes, it can 
lead to incorrect JSON marshaling",
+element.hasAttributes());
 
 if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
   found = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f7a3c85/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md

hadoop git commit: MAPREDUCE-7133. History Server task attempts REST API returns invalid data. Contributed by Oleksandr Shevchenko

2018-09-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 ab39d2979 -> acf752282


MAPREDUCE-7133. History Server task attempts REST API returns invalid data. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 2886024ac3a8613ecc27f1595b278ce6fc2d03ba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/acf75228
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/acf75228
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/acf75228

Branch: refs/heads/branch-2.9
Commit: acf75228274168d04e8c0388e60249fd1406b9b4
Parents: ab39d29
Author: Jason Lowe 
Authored: Thu Sep 13 14:41:38 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 14:54:50 2018 -0500

--
 .../hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java | 8 +---
 .../mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java   | 3 +++
 .../src/site/markdown/MapredAppMasterRest.md | 2 +-
 3 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/acf75228/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
index c92488f..6f188d9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
@@ -19,12 +19,10 @@ package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
 
 import java.util.ArrayList;
 
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlRootElement;
 
 @XmlRootElement(name = "taskAttempts")
-@XmlAccessorType(XmlAccessType.FIELD)
 public class TaskAttemptsInfo {
 
   protected ArrayList taskAttempt = new 
ArrayList();
@@ -36,6 +34,10 @@ public class TaskAttemptsInfo {
 taskAttempt.add(taskattemptInfo);
   }
 
+  // XmlElementRef annotation should be used to identify the exact type of a 
list element
+  // otherwise metadata will be added to XML attributes,
+  // it can lead to incorrect JSON marshaling
+  @XmlElementRef
   public ArrayList getTaskAttempts() {
 return taskAttempt;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acf75228/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
index 2165486..332ec75 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -475,6 +476,8 @@ public class TestAMWebServicesAttempts extends 
JerseyTestBase {
   Boolean found = false;
   for (int i = 0; i < nodes.getLength(); i++) {
 Element element = (Element) nodes.item(i);
+assertFalse("task attempt should not contain any attributes, it can 
lead to incorrect JSON marshaling",
+element.hasAttributes());
 
 if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
   found = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acf75228/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md

hadoop git commit: MAPREDUCE-7133. History Server task attempts REST API returns invalid data. Contributed by Oleksandr Shevchenko

2018-09-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bbd09877d -> 1e61f389e


MAPREDUCE-7133. History Server task attempts REST API returns invalid data. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 2886024ac3a8613ecc27f1595b278ce6fc2d03ba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e61f389
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e61f389
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e61f389

Branch: refs/heads/branch-2
Commit: 1e61f389e9d8ade8b8fb2cf0dbe8731f0cff1642
Parents: bbd0987
Author: Jason Lowe 
Authored: Thu Sep 13 14:41:38 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 14:51:19 2018 -0500

--
 .../hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java | 8 +---
 .../mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java   | 3 +++
 .../src/site/markdown/MapredAppMasterRest.md | 2 +-
 3 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e61f389/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
index c92488f..6f188d9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
@@ -19,12 +19,10 @@ package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
 
 import java.util.ArrayList;
 
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlRootElement;
 
 @XmlRootElement(name = "taskAttempts")
-@XmlAccessorType(XmlAccessType.FIELD)
 public class TaskAttemptsInfo {
 
   protected ArrayList taskAttempt = new 
ArrayList();
@@ -36,6 +34,10 @@ public class TaskAttemptsInfo {
 taskAttempt.add(taskattemptInfo);
   }
 
+  // XmlElementRef annotation should be used to identify the exact type of a 
list element
+  // otherwise metadata will be added to XML attributes,
+  // it can lead to incorrect JSON marshaling
+  @XmlElementRef
   public ArrayList getTaskAttempts() {
 return taskAttempt;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e61f389/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
index 2165486..332ec75 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -475,6 +476,8 @@ public class TestAMWebServicesAttempts extends 
JerseyTestBase {
   Boolean found = false;
   for (int i = 0; i < nodes.getLength(); i++) {
 Element element = (Element) nodes.item(i);
+assertFalse("task attempt should not contain any attributes, it can 
lead to incorrect JSON marshaling",
+element.hasAttributes());
 
 if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
   found = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e61f389/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md

hadoop git commit: MAPREDUCE-7133. History Server task attempts REST API returns invalid data. Contributed by Oleksandr Shevchenko

2018-09-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1314dc239 -> f918e4d89


MAPREDUCE-7133. History Server task attempts REST API returns invalid data. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 2886024ac3a8613ecc27f1595b278ce6fc2d03ba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f918e4d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f918e4d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f918e4d8

Branch: refs/heads/branch-3.0
Commit: f918e4d894cba413bc8b1134bb2c6bebc34cba42
Parents: 1314dc2
Author: Jason Lowe 
Authored: Thu Sep 13 14:41:38 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 14:47:37 2018 -0500

--
 .../hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java | 8 +---
 .../mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java   | 3 +++
 .../src/site/markdown/MapredAppMasterRest.md | 2 +-
 3 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f918e4d8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
index c92488f..6f188d9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
@@ -19,12 +19,10 @@ package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
 
 import java.util.ArrayList;
 
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlRootElement;
 
 @XmlRootElement(name = "taskAttempts")
-@XmlAccessorType(XmlAccessType.FIELD)
 public class TaskAttemptsInfo {
 
   protected ArrayList taskAttempt = new 
ArrayList();
@@ -36,6 +34,10 @@ public class TaskAttemptsInfo {
 taskAttempt.add(taskattemptInfo);
   }
 
+  // XmlElementRef annotation should be used to identify the exact type of a 
list element
+  // otherwise metadata will be added to XML attributes,
+  // it can lead to incorrect JSON marshaling
+  @XmlElementRef
   public ArrayList getTaskAttempts() {
 return taskAttempt;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f918e4d8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
index d92c275..32d054f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 import static 
org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseStatusCode;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -483,6 +484,8 @@ public class TestAMWebServicesAttempts extends 
JerseyTestBase {
   Boolean found = false;
   for (int i = 0; i < nodes.getLength(); i++) {
 Element element = (Element) nodes.item(i);
+assertFalse("task attempt should not contain any attributes, it can 
lead to incorrect JSON marshaling",
+element.hasAttributes());
 
 if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
   found = true;


hadoop git commit: MAPREDUCE-7133. History Server task attempts REST API returns invalid data. Contributed by Oleksandr Shevchenko

2018-09-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 88687213c -> 1105f5463


MAPREDUCE-7133. History Server task attempts REST API returns invalid data. 
Contributed by Oleksandr Shevchenko

(cherry picked from commit 2886024ac3a8613ecc27f1595b278ce6fc2d03ba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1105f546
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1105f546
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1105f546

Branch: refs/heads/branch-3.1
Commit: 1105f546346569b231016e06723c0bdbb540af2e
Parents: 8868721
Author: Jason Lowe 
Authored: Thu Sep 13 14:41:38 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 14:43:17 2018 -0500

--
 .../hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java | 8 +---
 .../mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java   | 3 +++
 .../src/site/markdown/MapredAppMasterRest.md | 2 +-
 3 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1105f546/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
index c92488f..6f188d9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
@@ -19,12 +19,10 @@ package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
 
 import java.util.ArrayList;
 
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlRootElement;
 
 @XmlRootElement(name = "taskAttempts")
-@XmlAccessorType(XmlAccessType.FIELD)
 public class TaskAttemptsInfo {
 
   protected ArrayList taskAttempt = new 
ArrayList();
@@ -36,6 +34,10 @@ public class TaskAttemptsInfo {
 taskAttempt.add(taskattemptInfo);
   }
 
+  // XmlElementRef annotation should be used to identify the exact type of a 
list element
+  // otherwise metadata will be added to XML attributes,
+  // it can lead to incorrect JSON marshaling
+  @XmlElementRef
   public ArrayList getTaskAttempts() {
 return taskAttempt;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1105f546/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
index d92c275..32d054f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 import static 
org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseStatusCode;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -483,6 +484,8 @@ public class TestAMWebServicesAttempts extends 
JerseyTestBase {
   Boolean found = false;
   for (int i = 0; i < nodes.getLength(); i++) {
 Element element = (Element) nodes.item(i);
+assertFalse("task attempt should not contain any attributes, it can 
lead to incorrect JSON marshaling",
+element.hasAttributes());
 
 if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
   found = true;


hadoop git commit: MAPREDUCE-7133. History Server task attempts REST API returns invalid data. Contributed by Oleksandr Shevchenko

2018-09-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 250b50018 -> 2886024ac


MAPREDUCE-7133. History Server task attempts REST API returns invalid data. 
Contributed by Oleksandr Shevchenko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2886024a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2886024a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2886024a

Branch: refs/heads/trunk
Commit: 2886024ac3a8613ecc27f1595b278ce6fc2d03ba
Parents: 250b500
Author: Jason Lowe 
Authored: Thu Sep 13 14:41:38 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 14:41:38 2018 -0500

--
 .../hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java | 8 +---
 .../mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java   | 3 +++
 .../src/site/markdown/MapredAppMasterRest.md | 2 +-
 3 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2886024a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
index c92488f..6f188d9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
@@ -19,12 +19,10 @@ package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
 
 import java.util.ArrayList;
 
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlRootElement;
 
 @XmlRootElement(name = "taskAttempts")
-@XmlAccessorType(XmlAccessType.FIELD)
 public class TaskAttemptsInfo {
 
   protected ArrayList taskAttempt = new 
ArrayList();
@@ -36,6 +34,10 @@ public class TaskAttemptsInfo {
 taskAttempt.add(taskattemptInfo);
   }
 
+  // XmlElementRef annotation should be used to identify the exact type of a 
list element
+  // otherwise metadata will be added to XML attributes,
+  // it can lead to incorrect JSON marshaling
+  @XmlElementRef
   public ArrayList getTaskAttempts() {
 return taskAttempt;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2886024a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
index d92c275..32d054f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 import static 
org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseStatusCode;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -483,6 +484,8 @@ public class TestAMWebServicesAttempts extends 
JerseyTestBase {
   Boolean found = false;
   for (int i = 0; i < nodes.getLength(); i++) {
 Element element = (Element) nodes.item(i);
+assertFalse("task attempt should not contain any attributes, it can 
lead to incorrect JSON marshaling",
+element.hasAttributes());
 
 if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
   found = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2886024a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md

hadoop git commit: YARN-8680. YARN NM: Implement Iterable Abstraction for LocalResourceTracker state. Contributed by Pradeep Ambati

2018-09-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 2f7d22658 -> 88687213c


YARN-8680. YARN NM: Implement Iterable Abstraction for LocalResourceTracker 
state. Contributed by Pradeep Ambati

(cherry picked from commit 250b50018e8c94d8ca83ff981b01f26bb68c0842)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88687213
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88687213
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88687213

Branch: refs/heads/branch-3.1
Commit: 88687213cc406e2c87a31888b6a73bcbaf8a3ed5
Parents: 2f7d226
Author: Jason Lowe 
Authored: Thu Sep 13 13:28:54 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 14:12:20 2018 -0500

--
 .../localizer/ResourceLocalizationService.java  |  87 +++---
 .../recovery/NMLeveldbStateStoreService.java| 173 +++-
 .../recovery/NMStateStoreService.java   |  29 +-
 .../recovery/NMMemoryStateStoreService.java |  18 +-
 .../TestNMLeveldbStateStoreService.java | 269 ---
 5 files changed, 418 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88687213/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 4f563cc..3e0260a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -311,63 +311,66 @@ public class ResourceLocalizationService extends 
CompositeService
 String user = userEntry.getKey();
 RecoveredUserResources userResources = userEntry.getValue();
 trackerState = userResources.getPrivateTrackerState();
-if (!trackerState.isEmpty()) {
-  LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-  null, dispatcher, true, super.getConfig(), stateStore,
-  dirsHandler);
-  LocalResourcesTracker oldTracker = privateRsrc.putIfAbsent(user,
-  tracker);
-  if (oldTracker != null) {
-tracker = oldTracker;
-  }
-  recoverTrackerResources(tracker, trackerState);
+LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
+null, dispatcher, true, super.getConfig(), stateStore,
+dirsHandler);
+LocalResourcesTracker oldTracker = privateRsrc.putIfAbsent(user,
+tracker);
+if (oldTracker != null) {
+  tracker = oldTracker;
 }
+recoverTrackerResources(tracker, trackerState);
 
 for (Map.Entry appEntry :
 userResources.getAppTrackerStates().entrySet()) {
   trackerState = appEntry.getValue();
-  if (!trackerState.isEmpty()) {
-ApplicationId appId = appEntry.getKey();
-String appIdStr = appId.toString();
-LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-appId, dispatcher, false, super.getConfig(), stateStore,
-dirsHandler);
-LocalResourcesTracker oldTracker = appRsrc.putIfAbsent(appIdStr,
-tracker);
-if (oldTracker != null) {
-  tracker = oldTracker;
-}
-recoverTrackerResources(tracker, trackerState);
+  ApplicationId appId = appEntry.getKey();
+  String appIdStr = appId.toString();
+  LocalResourcesTracker tracker1 = new LocalResourcesTrackerImpl(user,
+  appId, dispatcher, false, super.getConfig(), stateStore,
+  dirsHandler);
+  LocalResourcesTracker oldTracker1 = appRsrc.putIfAbsent(appIdStr,
+  tracker1);
+  if (oldTracker1 != null) {
+tracker1 = oldTracker1;
   }
+  recoverTrackerResources(tracker1, trackerState);
 }
   }
 }
   }
 
   private void recoverTrackerResources(LocalResourcesTracker tracker,
-  LocalResourceTrackerState 

hadoop git commit: YARN-8680. YARN NM: Implement Iterable Abstraction for LocalResourceTracker state. Contributed by Pradeep Ambati

2018-09-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk e1b242a98 -> 250b50018


YARN-8680. YARN NM: Implement Iterable Abstraction for LocalResourceTracker 
state. Contributed by Pradeep Ambati


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/250b5001
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/250b5001
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/250b5001

Branch: refs/heads/trunk
Commit: 250b50018e8c94d8ca83ff981b01f26bb68c0842
Parents: e1b242a
Author: Jason Lowe 
Authored: Thu Sep 13 13:28:54 2018 -0500
Committer: Jason Lowe 
Committed: Thu Sep 13 13:28:54 2018 -0500

--
 .../localizer/ResourceLocalizationService.java  |  87 +++---
 .../recovery/NMLeveldbStateStoreService.java| 173 +++-
 .../recovery/NMStateStoreService.java   |  29 +-
 .../recovery/NMMemoryStateStoreService.java |  18 +-
 .../TestNMLeveldbStateStoreService.java | 269 ---
 5 files changed, 418 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/250b5001/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index d9b887f..71f48ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -308,63 +308,66 @@ public class ResourceLocalizationService extends 
CompositeService
 String user = userEntry.getKey();
 RecoveredUserResources userResources = userEntry.getValue();
 trackerState = userResources.getPrivateTrackerState();
-if (!trackerState.isEmpty()) {
-  LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-  null, dispatcher, true, super.getConfig(), stateStore,
-  dirsHandler);
-  LocalResourcesTracker oldTracker = privateRsrc.putIfAbsent(user,
-  tracker);
-  if (oldTracker != null) {
-tracker = oldTracker;
-  }
-  recoverTrackerResources(tracker, trackerState);
+LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
+null, dispatcher, true, super.getConfig(), stateStore,
+dirsHandler);
+LocalResourcesTracker oldTracker = privateRsrc.putIfAbsent(user,
+tracker);
+if (oldTracker != null) {
+  tracker = oldTracker;
 }
+recoverTrackerResources(tracker, trackerState);
 
 for (Map.Entry appEntry :
 userResources.getAppTrackerStates().entrySet()) {
   trackerState = appEntry.getValue();
-  if (!trackerState.isEmpty()) {
-ApplicationId appId = appEntry.getKey();
-String appIdStr = appId.toString();
-LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-appId, dispatcher, false, super.getConfig(), stateStore,
-dirsHandler);
-LocalResourcesTracker oldTracker = appRsrc.putIfAbsent(appIdStr,
-tracker);
-if (oldTracker != null) {
-  tracker = oldTracker;
-}
-recoverTrackerResources(tracker, trackerState);
+  ApplicationId appId = appEntry.getKey();
+  String appIdStr = appId.toString();
+  LocalResourcesTracker tracker1 = new LocalResourcesTrackerImpl(user,
+  appId, dispatcher, false, super.getConfig(), stateStore,
+  dirsHandler);
+  LocalResourcesTracker oldTracker1 = appRsrc.putIfAbsent(appIdStr,
+  tracker1);
+  if (oldTracker1 != null) {
+tracker1 = oldTracker1;
   }
+  recoverTrackerResources(tracker1, trackerState);
 }
   }
 }
   }
 
   private void recoverTrackerResources(LocalResourcesTracker tracker,
-  LocalResourceTrackerState state) throws URISyntaxException {
-for (LocalizedResourceProto proto : 

  1   2   3   4   5   6   7   8   9   10   >