hadoop git commit: HDFS-5926 Documentation should clarify dfs.datanode.du.reserved impact from reserved disk capacity (Gabor Bota)

2018-05-04 Thread fabbri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6a69239d8 -> a732acd87


HDFS-5926 Documentation should clarify dfs.datanode.du.reserved impact from 
reserved disk capacity (Gabor Bota)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a732acd8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a732acd8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a732acd8

Branch: refs/heads/trunk
Commit: a732acd8730277df4d9b97b97101bc2bc768800f
Parents: 6a69239
Author: Aaron Fabbri 
Authored: Fri May 4 16:37:47 2018 -0700
Committer: Aaron Fabbri 
Committed: Fri May 4 16:38:30 2018 -0700

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml   | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a732acd8/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index c64b2f1..7a437ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -349,6 +349,9 @@
   For example, reserved space for RAM_DISK storage can be configured using 
property
   'dfs.datanode.du.reserved.ram_disk'. If specific storage type 
reservation is not configured
   then dfs.datanode.du.reserved will be used.
+  Note: In case of using tune2fs to set reserved-blocks-percentage, or 
other filesystem tools,
+  then you can possibly run into out of disk errors because hadoop will 
not check those
+  external tool configurations.
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8163. Add support for Node Labels in opportunistic scheduling. Contributed by Abhishek Modi.

2018-05-04 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4cdbdce75 -> 6a69239d8


YARN-8163. Add support for Node Labels in opportunistic scheduling. Contributed 
by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a69239d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a69239d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a69239d

Branch: refs/heads/trunk
Commit: 6a69239d867070ee85d79026542033ac661c4c1c
Parents: 4cdbdce
Author: Inigo Goiri 
Authored: Fri May 4 14:59:59 2018 -0700
Committer: Inigo Goiri 
Committed: Fri May 4 14:59:59 2018 -0700

--
 .../server/api/protocolrecords/RemoteNode.java  | 40 +++-
 .../impl/pb/RemoteNodePBImpl.java   | 19 ++
 .../OpportunisticContainerAllocator.java| 38 ---
 .../yarn_server_common_service_protos.proto |  1 +
 .../TestOpportunisticContainerAllocator.java| 37 ++
 ...pportunisticContainerAllocatorAMService.java | 12 ++
 ...pportunisticContainerAllocatorAMService.java | 10 -
 7 files changed, 149 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a69239d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoteNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoteNode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoteNode.java
index f621aa2..67ad5ba 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoteNode.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoteNode.java
@@ -65,6 +65,26 @@ public abstract class RemoteNode implements 
Comparable {
   }
 
   /**
+   * Create new Instance.
+   * @param nodeId NodeId.
+   * @param httpAddress Http address.
+   * @param rackName Rack Name.
+   * @param nodePartition Node Partition.
+   * @return RemoteNode Instance.
+   */
+  @Private
+  @Unstable
+  public static RemoteNode newInstance(NodeId nodeId, String httpAddress,
+  String rackName, String nodePartition) {
+RemoteNode remoteNode = Records.newRecord(RemoteNode.class);
+remoteNode.setNodeId(nodeId);
+remoteNode.setHttpAddress(httpAddress);
+remoteNode.setRackName(rackName);
+remoteNode.setNodePartition(nodePartition);
+return remoteNode;
+  }
+
+  /**
* Get {@link NodeId}.
* @return NodeId.
*/
@@ -117,6 +137,23 @@ public abstract class RemoteNode implements 
Comparable {
* @param other RemoteNode.
* @return Comparison.
*/
+
+  /**
+   * Get Node Partition.
+   * @return Node Partition.
+   */
+  @Private
+  @Unstable
+  public  abstract String getNodePartition();
+
+  /**
+   * Set Node Partition.
+   * @param nodePartition
+   */
+  @Private
+  @Unstable
+  public abstract void setNodePartition(String nodePartition);
+
   @Override
   public int compareTo(RemoteNode other) {
 return this.getNodeId().compareTo(other.getNodeId());
@@ -127,6 +164,7 @@ public abstract class RemoteNode implements 
Comparable {
 return "RemoteNode{" +
 "nodeId=" + getNodeId() + ", " +
 "rackName=" + getRackName() + ", " +
-"httpAddress=" + getHttpAddress() + "}";
+"httpAddress=" + getHttpAddress() + ", " +
+"partition=" + getNodePartition() + "}";
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a69239d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoteNodePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoteNodePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoteNodePBImpl.java
index c2492cf..8fb4357 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoteNodePBImpl.java
+++ 

hadoop git commit: HADOOP-14841 Kms client should disconnect if unable to get output stream from connection. Contributed by Rushabh S Shah

2018-05-04 Thread shahrs87
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 26d140d62 -> 1fec21f85


HADOOP-14841 Kms client should disconnect if unable to get output stream from 
connection. Contributed by Rushabh S Shah

(cherry picked from commit 4cdbdce752e192b45c2b9756c2d4bd24ceffdabd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fec21f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fec21f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fec21f8

Branch: refs/heads/branch-2.8
Commit: 1fec21f8504093e157195a93c5661e0b58b5a4bc
Parents: 26d140d
Author: Rushabh Shah 
Authored: Fri May 4 15:36:13 2018 -0500
Committer: Rushabh Shah 
Committed: Fri May 4 16:03:11 2018 -0500

--
 .../hadoop/crypto/key/kms/KMSClientProvider.java   | 13 +++--
 1 file changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fec21f8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 59ec9cc..bd951f4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -519,12 +519,21 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   int expectedResponse, Class klass, int authRetryCount)
   throws IOException {
 T ret = null;
+OutputStream os = null;
 try {
   if (jsonOutput != null) {
-writeJson(jsonOutput, conn.getOutputStream());
+os = conn.getOutputStream();
+writeJson(jsonOutput, os);
   }
 } catch (IOException ex) {
-  IOUtils.closeStream(conn.getInputStream());
+  // The payload is not serialized if getOutputStream fails.
+  // Calling getInputStream will issue the put/post request with no payload
+  // which causes HTTP 500 server error.
+  if (os == null) {
+conn.disconnect();
+  } else {
+IOUtils.closeStream(conn.getInputStream());
+  }
   throw ex;
 }
 if ((conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14841 Kms client should disconnect if unable to get output stream from connection. Contributed by Rushabh S Shah

2018-05-04 Thread shahrs87
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 4f75d228e -> a8d5a56bb


HADOOP-14841 Kms client should disconnect if unable to get output stream from 
connection. Contributed by Rushabh S Shah

(cherry picked from commit 4cdbdce752e192b45c2b9756c2d4bd24ceffdabd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8d5a56b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8d5a56b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8d5a56b

Branch: refs/heads/branch-2.9
Commit: a8d5a56bb56bf675f6608950b73290aa41b74b95
Parents: 4f75d22
Author: Rushabh Shah 
Authored: Fri May 4 15:36:13 2018 -0500
Committer: Rushabh Shah 
Committed: Fri May 4 15:57:49 2018 -0500

--
 .../hadoop/crypto/key/kms/KMSClientProvider.java   | 13 +++--
 1 file changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8d5a56b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 536de53..3b3a33e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -522,12 +522,21 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   int expectedResponse, Class klass, int authRetryCount)
   throws IOException {
 T ret = null;
+OutputStream os = null;
 try {
   if (jsonOutput != null) {
-writeJson(jsonOutput, conn.getOutputStream());
+os = conn.getOutputStream();
+writeJson(jsonOutput, os);
   }
 } catch (IOException ex) {
-  IOUtils.closeStream(conn.getInputStream());
+  // The payload is not serialized if getOutputStream fails.
+  // Calling getInputStream will issue the put/post request with no payload
+  // which causes HTTP 500 server error.
+  if (os == null) {
+conn.disconnect();
+  } else {
+IOUtils.closeStream(conn.getInputStream());
+  }
   throw ex;
 }
 if ((conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14841 Kms client should disconnect if unable to get output stream from connection. Contributed by Rushabh S Shah

2018-05-04 Thread shahrs87
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e7b5c0091 -> d26164517


HADOOP-14841 Kms client should disconnect if unable to get output stream from 
connection. Contributed by Rushabh S Shah

(cherry picked from commit 4cdbdce752e192b45c2b9756c2d4bd24ceffdabd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2616451
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2616451
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2616451

Branch: refs/heads/branch-2
Commit: d261645177bcae8b5501d6944c7ea0ffdc242f9a
Parents: e7b5c00
Author: Rushabh Shah 
Authored: Fri May 4 15:36:13 2018 -0500
Committer: Rushabh Shah 
Committed: Fri May 4 15:55:12 2018 -0500

--
 .../hadoop/crypto/key/kms/KMSClientProvider.java   | 13 +++--
 1 file changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2616451/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 536de53..3b3a33e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -522,12 +522,21 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   int expectedResponse, Class klass, int authRetryCount)
   throws IOException {
 T ret = null;
+OutputStream os = null;
 try {
   if (jsonOutput != null) {
-writeJson(jsonOutput, conn.getOutputStream());
+os = conn.getOutputStream();
+writeJson(jsonOutput, os);
   }
 } catch (IOException ex) {
-  IOUtils.closeStream(conn.getInputStream());
+  // The payload is not serialized if getOutputStream fails.
+  // Calling getInputStream will issue the put/post request with no payload
+  // which causes HTTP 500 server error.
+  if (os == null) {
+conn.disconnect();
+  } else {
+IOUtils.closeStream(conn.getInputStream());
+  }
   throw ex;
 }
 if ((conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14841 Kms client should disconnect if unable to get output stream from connection. Contributed by Rushabh S Shah

2018-05-04 Thread shahrs87
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 e933ed0ee -> 864640c1e


HADOOP-14841 Kms client should disconnect if unable to get output stream from 
connection. Contributed by Rushabh S Shah

(cherry picked from commit 4cdbdce752e192b45c2b9756c2d4bd24ceffdabd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/864640c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/864640c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/864640c1

Branch: refs/heads/branch-3.1
Commit: 864640c1e37fc114bb661c6233cd41ce904dc2bd
Parents: e933ed0
Author: Rushabh Shah 
Authored: Fri May 4 15:36:13 2018 -0500
Committer: Rushabh Shah 
Committed: Fri May 4 15:49:39 2018 -0500

--
 .../hadoop/crypto/key/kms/KMSClientProvider.java   | 13 +++--
 1 file changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/864640c1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index f97fde7..45097ef 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -447,12 +447,21 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   int expectedResponse, Class klass, int authRetryCount)
   throws IOException {
 T ret = null;
+OutputStream os = null;
 try {
   if (jsonOutput != null) {
-writeJson(jsonOutput, conn.getOutputStream());
+os = conn.getOutputStream();
+writeJson(jsonOutput, os);
   }
 } catch (IOException ex) {
-  IOUtils.closeStream(conn.getInputStream());
+  // The payload is not serialized if getOutputStream fails.
+  // Calling getInputStream will issue the put/post request with no payload
+  // which causes HTTP 500 server error.
+  if (os == null) {
+conn.disconnect();
+  } else {
+IOUtils.closeStream(conn.getInputStream());
+  }
   throw ex;
 }
 if ((conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14841 Kms client should disconnect if unable to get output stream from connection. Contributed by Rushabh S Shah

2018-05-04 Thread shahrs87
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 21442dcaa -> d9fae993b


HADOOP-14841 Kms client should disconnect if unable to get output stream from 
connection. Contributed by Rushabh S Shah

(cherry picked from commit 4cdbdce752e192b45c2b9756c2d4bd24ceffdabd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9fae993
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9fae993
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9fae993

Branch: refs/heads/branch-3.0
Commit: d9fae993b47709aa295429542c3451aedf6aea0e
Parents: 21442dc
Author: Rushabh Shah 
Authored: Fri May 4 15:36:13 2018 -0500
Committer: Rushabh Shah 
Committed: Fri May 4 15:43:24 2018 -0500

--
 .../hadoop/crypto/key/kms/KMSClientProvider.java   | 13 +++--
 1 file changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9fae993/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index f97fde7..45097ef 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -447,12 +447,21 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   int expectedResponse, Class klass, int authRetryCount)
   throws IOException {
 T ret = null;
+OutputStream os = null;
 try {
   if (jsonOutput != null) {
-writeJson(jsonOutput, conn.getOutputStream());
+os = conn.getOutputStream();
+writeJson(jsonOutput, os);
   }
 } catch (IOException ex) {
-  IOUtils.closeStream(conn.getInputStream());
+  // The payload is not serialized if getOutputStream fails.
+  // Calling getInputStream will issue the put/post request with no payload
+  // which causes HTTP 500 server error.
+  if (os == null) {
+conn.disconnect();
+  } else {
+IOUtils.closeStream(conn.getInputStream());
+  }
   throw ex;
 }
 if ((conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14841 Kms client should disconnect if unable to get output stream from connection. Contributed by Rushabh S Shah

2018-05-04 Thread shahrs87
Repository: hadoop
Updated Branches:
  refs/heads/trunk 96c843f64 -> 4cdbdce75


HADOOP-14841 Kms client should disconnect if unable to get output stream from 
connection. Contributed by Rushabh S Shah


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cdbdce7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cdbdce7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cdbdce7

Branch: refs/heads/trunk
Commit: 4cdbdce752e192b45c2b9756c2d4bd24ceffdabd
Parents: 96c843f
Author: Rushabh Shah 
Authored: Fri May 4 15:36:13 2018 -0500
Committer: Rushabh Shah 
Committed: Fri May 4 15:36:13 2018 -0500

--
 .../hadoop/crypto/key/kms/KMSClientProvider.java   | 13 +++--
 1 file changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cdbdce7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index f97fde7..45097ef 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -447,12 +447,21 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   int expectedResponse, Class klass, int authRetryCount)
   throws IOException {
 T ret = null;
+OutputStream os = null;
 try {
   if (jsonOutput != null) {
-writeJson(jsonOutput, conn.getOutputStream());
+os = conn.getOutputStream();
+writeJson(jsonOutput, os);
   }
 } catch (IOException ex) {
-  IOUtils.closeStream(conn.getInputStream());
+  // The payload is not serialized if getOutputStream fails.
+  // Calling getInputStream will issue the put/post request with no payload
+  // which causes HTTP 500 server error.
+  if (os == null) {
+conn.disconnect();
+  } else {
+IOUtils.closeStream(conn.getInputStream());
+  }
   throw ex;
 }
 if ((conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: HDDS-15. Add memory profiler support to Genesis. Contributed by Anu Engineer.

2018-05-04 Thread xkrogen
HDDS-15. Add memory profiler support to Genesis. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b63a0af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b63a0af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b63a0af

Branch: refs/heads/HDFS-12943
Commit: 6b63a0af9b29c231166d9af50d499a246cbbb755
Parents: 3b34fca
Author: Anu Engineer 
Authored: Wed May 2 10:44:47 2018 -0700
Committer: Anu Engineer 
Committed: Wed May 2 12:54:49 2018 -0700

--
 .../apache/hadoop/ozone/genesis/Genesis.java|  7 ++-
 .../ozone/genesis/GenesisMemoryProfiler.java| 59 
 2 files changed, 65 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b63a0af/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
--
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
index 5efa12a..0dc3db7 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java
@@ -42,9 +42,12 @@ public final class Genesis {
 .include(BenchMarkMetadataStoreReads.class.getSimpleName())
 .include(BenchMarkMetadataStoreWrites.class.getSimpleName())
 .include(BenchMarkDatanodeDispatcher.class.getSimpleName())
-.include(BenchMarkRocksDbStore.class.getSimpleName())
+// Commenting this test out, till we support either a command line or a config
+// file based ability to run tests.
+//.include(BenchMarkRocksDbStore.class.getSimpleName())
 .warmupIterations(5)
 .measurementIterations(20)
+.addProfiler(GenesisMemoryProfiler.class)
 .shouldDoGC(true)
 .forks(1)
 .build();
@@ -52,3 +55,5 @@ public final class Genesis {
 new Runner(opt).run();
   }
 }
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b63a0af/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
--
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
new file mode 100644
index 000..090f1a7
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.genesis;
+
+import org.openjdk.jmh.infra.BenchmarkParams;
+import org.openjdk.jmh.infra.IterationParams;
+import org.openjdk.jmh.profile.InternalProfiler;
+import org.openjdk.jmh.results.AggregationPolicy;
+import org.openjdk.jmh.results.IterationResult;
+import org.openjdk.jmh.results.Result;
+import org.openjdk.jmh.results.ScalarResult;
+
+import java.util.ArrayList;
+import java.util.Collection;
+
+/**
+ * Max memory profiler.
+ */
+public class GenesisMemoryProfiler implements InternalProfiler {
+  @Override
+  public void beforeIteration(BenchmarkParams benchmarkParams,
+  IterationParams iterationParams) {
+
+  }
+
+  @Override
+  public Collection afterIteration(BenchmarkParams
+  benchmarkParams, IterationParams iterationParams, IterationResult
+  result) {
+long totalHeap = Runtime.getRuntime().totalMemory();
+
+Collection samples = new ArrayList<>();
+samples.add(new ScalarResult("Max heap", totalHeap, "bytes",
+AggregationPolicy.MAX));
+return samples;
+  }
+
+  @Override
+  public String getDescription() {
+return "Genesis Memory Profiler. Computes Max Memory used by a test.";
+  }
+}
+


-
To 

[24/50] [abbrv] hadoop git commit: HADOOP-15250. Split-DNS MultiHomed Server Network Cluster Network IPC Client Bind Addr Wrong Contributed by Ajay Kumar

2018-05-04 Thread xkrogen
HADOOP-15250. Split-DNS MultiHomed Server Network Cluster Network IPC Client 
Bind Addr Wrong
Contributed by Ajay Kumar


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f42dafc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f42dafc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f42dafc

Branch: refs/heads/HDFS-12943
Commit: 8f42dafcf82d5b426dd931dc5ddd177dd6f283f7
Parents: 68c6ec7
Author: Steve Loughran 
Authored: Tue May 1 22:32:40 2018 +0100
Committer: Steve Loughran 
Committed: Tue May 1 22:32:40 2018 +0100

--
 .../apache/hadoop/fs/CommonConfigurationKeys.java   |  4 
 .../src/main/java/org/apache/hadoop/ipc/Client.java | 16 
 .../main/java/org/apache/hadoop/net/NetUtils.java   | 16 
 .../src/main/resources/core-default.xml |  8 
 .../java/org/apache/hadoop/net/TestNetUtils.java|  8 
 5 files changed, 48 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f42dafc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 043e52a..1eb27f8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -341,6 +341,10 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final String  IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = 
"ipc.client.fallback-to-simple-auth-allowed";
   public static final boolean 
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false;
 
+  public static final String  IPC_CLIENT_BIND_WILDCARD_ADDR_KEY = "ipc.client"
+  + ".bind.wildcard.addr";
+  public static final boolean IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT = false;
+
   public static final String IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY =
 "ipc.client.connect.max.retries.on.sasl";
   public static final intIPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT = 
5;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f42dafc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index a0417d6..163e80d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -135,6 +135,7 @@ public class Client implements AutoCloseable {
   private final int connectionTimeout;
 
   private final boolean fallbackAllowed;
+  private final boolean bindToWildCardAddress;
   private final byte[] clientId;
   private final int maxAsyncCalls;
   private final AtomicInteger asyncCallCounter = new AtomicInteger(0);
@@ -674,10 +675,10 @@ public class Client implements AutoCloseable {
   InetAddress localAddr = NetUtils.getLocalInetAddress(host);
   if (localAddr != null) {
 this.socket.setReuseAddress(true);
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Binding " + principal + " to " + localAddr);
-}
-bindAddr = new InetSocketAddress(localAddr, 0);
+localAddr = NetUtils.bindToLocalAddress(localAddr,
+bindToWildCardAddress);
+LOG.debug("Binding {} to {}", principal, localAddr);
+this.socket.bind(new InetSocketAddress(localAddr, 0));
   }
 }
   }
@@ -1277,6 +1278,13 @@ public class Client implements AutoCloseable {
 CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
 this.fallbackAllowed = 
conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
 
CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
+this.bindToWildCardAddress = conf
+.getBoolean(CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY,
+CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT);
+LOG.debug("{} set to true. Will bind client sockets to wildcard "
++ "address.",
+

[26/50] [abbrv] hadoop git commit: MAPREDUCE-7073. Optimize TokenCache#obtainTokensForNamenodesInternal

2018-05-04 Thread xkrogen
MAPREDUCE-7073. Optimize TokenCache#obtainTokensForNamenodesInternal

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a95a452
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a95a452
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a95a452

Branch: refs/heads/HDFS-12943
Commit: 1a95a4524a8c6c7be601ce8b92640a6a76164a2c
Parents: 3726926
Author: Bibin A Chundatt 
Authored: Wed May 2 16:14:28 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed May 2 16:14:28 2018 +0900

--
 .../hadoop/mapreduce/security/TokenCache.java | 14 +-
 .../hadoop/mapreduce/security/TestTokenCache.java | 18 +-
 2 files changed, 18 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a95a452/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
index 12fced9..1156c67 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -96,8 +97,9 @@ public class TokenCache {
 for(Path p: ps) {
   fsSet.add(p.getFileSystem(conf));
 }
+String masterPrincipal = Master.getMasterPrincipal(conf);
 for (FileSystem fs : fsSet) {
-  obtainTokensForNamenodesInternal(fs, credentials, conf);
+  obtainTokensForNamenodesInternal(fs, credentials, conf, masterPrincipal);
 }
   }
 
@@ -122,15 +124,17 @@ public class TokenCache {
* @param conf
* @throws IOException
*/
-  static void obtainTokensForNamenodesInternal(FileSystem fs, 
-  Credentials credentials, Configuration conf) throws IOException {
+  static void obtainTokensForNamenodesInternal(FileSystem fs,
+  Credentials credentials, Configuration conf, String renewer)
+  throws IOException {
 // RM skips renewing token with empty renewer
 String delegTokenRenewer = "";
 if (!isTokenRenewalExcluded(fs, conf)) {
-  delegTokenRenewer = Master.getMasterPrincipal(conf);
-  if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) {
+  if (StringUtils.isEmpty(renewer)) {
 throw new IOException(
 "Can't get Master Kerberos principal for use as renewer");
+  } else {
+delegTokenRenewer = renewer;
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a95a452/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
index 127f8ae..a44e533 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
@@ -56,8 +56,8 @@ public class TestTokenCache {
   @Test
   public void testObtainTokens() throws Exception {
 Credentials credentials = new Credentials();
-FileSystem fs = mock(FileSystem.class);  
-TokenCache.obtainTokensForNamenodesInternal(fs, credentials, conf);
+FileSystem fs = mock(FileSystem.class);
+TokenCache.obtainTokensForNamenodesInternal(fs, credentials, conf, 
renewer);
 verify(fs).addDelegationTokens(eq(renewer), eq(credentials));
   }
 
@@ -105,23 +105,23 @@ public class TestTokenCache {
 checkToken(creds, newerToken1);
  

[29/50] [abbrv] hadoop git commit: YARN-6385. Fix checkstyle warnings in TestFileSystemApplicationHistoryStore

2018-05-04 Thread xkrogen
YARN-6385. Fix checkstyle warnings in TestFileSystemApplicationHistoryStore

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3265b551
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3265b551
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3265b551

Branch: refs/heads/HDFS-12943
Commit: 3265b55119d39ecbda6d75be04a9a1bf59c631f1
Parents: e07156e
Author: Yiqun Lin 
Authored: Wed May 2 18:14:02 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed May 2 18:14:02 2018 +0900

--
 .../TestFileSystemApplicationHistoryStore.java| 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3265b551/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
index df4adbe..6b068c1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
@@ -272,20 +272,20 @@ public class TestFileSystemApplicationHistoryStore extends
 tearDown();
 
 // Setup file system to inject startup conditions
-FileSystem fs = spy(new RawLocalFileSystem());
+FileSystem fileSystem = spy(new RawLocalFileSystem());
 FileStatus fileStatus = Mockito.mock(FileStatus.class);
 doReturn(true).when(fileStatus).isDirectory();
-doReturn(fileStatus).when(fs).getFileStatus(any(Path.class));
+doReturn(fileStatus).when(fileSystem).getFileStatus(any(Path.class));
 
 try {
-  initAndStartStore(fs);
+  initAndStartStore(fileSystem);
 } catch (Exception e) {
   Assert.fail("Exception should not be thrown: " + e);
 }
 
 // Make sure that directory creation was not attempted
 verify(fileStatus, never()).isDirectory();
-verify(fs, times(1)).mkdirs(any(Path.class));
+verify(fileSystem, times(1)).mkdirs(any(Path.class));
   }
 
   @Test
@@ -294,14 +294,14 @@ public class TestFileSystemApplicationHistoryStore extends
 tearDown();
 
 // Setup file system to inject startup conditions
-FileSystem fs = spy(new RawLocalFileSystem());
+FileSystem fileSystem = spy(new RawLocalFileSystem());
 FileStatus fileStatus = Mockito.mock(FileStatus.class);
 doReturn(false).when(fileStatus).isDirectory();
-doReturn(fileStatus).when(fs).getFileStatus(any(Path.class));
-doThrow(new IOException()).when(fs).mkdirs(any(Path.class));
+doReturn(fileStatus).when(fileSystem).getFileStatus(any(Path.class));
+doThrow(new IOException()).when(fileSystem).mkdirs(any(Path.class));
 
 try {
-  initAndStartStore(fs);
+  initAndStartStore(fileSystem);
   Assert.fail("Exception should have been thrown");
 } catch (Exception e) {
   // Expected failure
@@ -309,6 +309,6 @@ public class TestFileSystemApplicationHistoryStore extends
 
 // Make sure that directory creation was attempted
 verify(fileStatus, never()).isDirectory();
-verify(fs, times(1)).mkdirs(any(Path.class));
+verify(fileSystem, times(1)).mkdirs(any(Path.class));
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.

2018-05-04 Thread xkrogen
HDFS-13283. Percentage based Reserved Space Calculation for DataNode. 
Contributed by Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc074a35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc074a35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc074a35

Branch: refs/heads/HDFS-12943
Commit: fc074a359c44e84dd9612be2bd772763f943eb04
Parents: 9b09555
Author: Inigo Goiri 
Authored: Mon Apr 30 13:28:33 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 30 13:28:33 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  38 ++--
 .../fsdataset/impl/FsVolumeImplBuilder.java |  16 +-
 .../fsdataset/impl/ProvidedVolumeImpl.java  |   2 +-
 .../fsdataset/impl/ReservedSpaceCalculator.java | 227 +++
 .../src/main/resources/hdfs-default.xml |  28 +++
 .../fsdataset/impl/TestFsVolumeList.java|  90 +++-
 .../impl/TestReservedSpaceCalculator.java   | 171 ++
 8 files changed, 561 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index a7f0a07..bc8e81f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -647,8 +648,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = 
"dfs.datanode.dns.nameserver";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY =
+  "dfs.datanode.du.reserved.calculator";
+  public static final Class
+  DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT =
+  ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute.class;
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = 
"dfs.datanode.du.reserved";
   public static final longDFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY =
+  "dfs.datanode.du.reserved.pct";
+  public static final int DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT = 0;
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = 
"dfs.datanode.handler.count";
   public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index b8c95a4..9969976 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -78,7 +78,6 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTrack
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.slf4j.Logger;
@@ -121,7 +120,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   private final File currentDir;// 

[38/50] [abbrv] hadoop git commit: YARN-8151. Yarn RM Epoch should wrap around. Contributed by Young Chen.

2018-05-04 Thread xkrogen
YARN-8151. Yarn RM Epoch should wrap around. Contributed by Young Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6a80e47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6a80e47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6a80e47

Branch: refs/heads/HDFS-12943
Commit: e6a80e476d4348a4373e6dd5792d70edff16516f
Parents: 87c23ef
Author: Inigo Goiri 
Authored: Wed May 2 17:23:17 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 2 17:23:17 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java   |  4 
 .../src/main/resources/yarn-default.xml   |  7 +++
 .../recovery/FileSystemRMStateStore.java  |  4 ++--
 .../recovery/LeveldbRMStateStore.java |  2 +-
 .../recovery/MemoryRMStateStore.java  |  2 +-
 .../resourcemanager/recovery/RMStateStore.java| 18 +-
 .../resourcemanager/recovery/ZKRMStateStore.java  |  4 ++--
 .../recovery/RMStateStoreTestBase.java| 14 ++
 .../recovery/TestFSRMStateStore.java  |  1 +
 .../recovery/TestLeveldbRMStateStore.java |  1 +
 .../recovery/TestZKRMStateStore.java  |  1 +
 11 files changed, 51 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8aa136d..5ba2e05 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -188,6 +188,10 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_EPOCH = RM_PREFIX + "epoch";
   public static final long DEFAULT_RM_EPOCH = 0L;
 
+  /** The epoch range before wrap around. 0 disables wrap around*/
+  public static final String RM_EPOCH_RANGE = RM_EPOCH + ".range";
+  public static final long DEFAULT_RM_EPOCH_RANGE = 0;
+
   /** The address of the applications manager interface in the RM.*/
   public static final String RM_ADDRESS = 
 RM_PREFIX + "address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 85915c2..4eb509f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -677,6 +677,13 @@
   
 
   
+The range of values above base epoch that the RM will use 
before
+  wrapping around
+yarn.resourcemanager.epoch.range
+0
+  
+
+  
 The list of RM nodes in the cluster when HA is
   enabled. See description of yarn.resourcemanager.ha
   .enabled for full details on how this is used.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a80e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 19297bc..b797283 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -205,12 +205,12 @@ public class FileSystemRMStateStore extends RMStateStore {
   Epoch epoch = new EpochPBImpl(EpochProto.parseFrom(data));
 

[11/50] [abbrv] hadoop git commit: HDDS-11. Fix findbugs exclude rules for ozone and hdds projects. Contributed by Elek, Marton.

2018-05-04 Thread xkrogen
HDDS-11. Fix findbugs exclude rules for ozone and hdds projects. Contributed by 
Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d43474f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d43474f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d43474f

Branch: refs/heads/HDFS-12943
Commit: 3d43474f7567117e4e11a0d198be6aa1fc023106
Parents: eb7fe1d
Author: Anu Engineer 
Authored: Mon Apr 30 09:20:58 2018 -0700
Committer: Anu Engineer 
Committed: Mon Apr 30 09:20:58 2018 -0700

--
 .../dev-support/findbugsExcludeFile.xml | 21 
 hadoop-hdds/container-service/pom.xml   |  7 +++
 .../tools/dev-support/findbugsExcludeFile.xml   | 19 ++
 hadoop-ozone/tools/pom.xml  | 14 +
 4 files changed, 61 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
--
diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 000..3571a89
--- /dev/null
+++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,21 @@
+
+
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-hdds/container-service/pom.xml
--
diff --git a/hadoop-hdds/container-service/pom.xml 
b/hadoop-hdds/container-service/pom.xml
index 3dc8470..36c7235 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -98,6 +98,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   
+  
+org.codehaus.mojo
+findbugs-maven-plugin
+
+  
${basedir}/dev-support/findbugsExcludeFile.xml
+
+  
 
   
 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
--
diff --git a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml 
b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 000..e6a345e
--- /dev/null
+++ b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,19 @@
+
+
+
+ 
+   
+ 
+ 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-ozone/tools/pom.xml
--
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index 918a675..839ca0d 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -68,4 +68,18 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   1.19
 
   
+  
+
+  
+org.codehaus.mojo
+findbugs-maven-plugin
+
+  ${basedir}/dev-support/findbugsExcludeFile.xml
+  
+  true
+  2048
+
+  
+
+  
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: YARN-8225. YARN precommit build failing in TestPlacementConstraintTransformations. Contributed by Shane Kumpf.

2018-05-04 Thread xkrogen
YARN-8225. YARN precommit build failing in 
TestPlacementConstraintTransformations. Contributed by Shane Kumpf.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c95eb81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c95eb81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c95eb81

Branch: refs/heads/HDFS-12943
Commit: 2c95eb8111a7b03fd4683f740123cd4720b62c3e
Parents: 4844406
Author: Weiwei Yang 
Authored: Sat Apr 28 17:37:37 2018 +0800
Committer: Weiwei Yang 
Committed: Sat Apr 28 17:37:37 2018 +0800

--
 .../yarn/api/resource/PlacementConstraintTransformations.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c95eb81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
index c5d21af..a15b20a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.api.resource;
 import java.util.ListIterator;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType;
 import 
org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
 import 
org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
@@ -162,7 +163,8 @@ public class PlacementConstraintTransformations {
 public AbstractConstraint visit(CardinalityConstraint constraint) {
   return new SingleConstraint(constraint.getScope(),
   constraint.getMinCardinality(), constraint.getMaxCardinality(),
-  new TargetExpression(TargetExpression.TargetType.ALLOCATION_TAG, 
null,
+  new TargetExpression(TargetExpression.TargetType.ALLOCATION_TAG,
+  AllocationTagNamespaceType.SELF.toString(),
   constraint.getAllocationTags()));
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: YARN-7818. Remove privileged operation warnings during container launch for the ContainerRuntimes. Contributed by Shane Kumpf

2018-05-04 Thread xkrogen
YARN-7818. Remove privileged operation warnings during container launch for the 
ContainerRuntimes. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/502914ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/502914ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/502914ca

Branch: refs/heads/HDFS-12943
Commit: 502914ca32ac02b19116fd681eb8301b92fccbb3
Parents: a3b416f
Author: Billie Rinaldi 
Authored: Fri May 4 08:53:55 2018 -0700
Committer: Billie Rinaldi 
Committed: Fri May 4 08:53:55 2018 -0700

--
 .../linux/runtime/DefaultLinuxContainerRuntime.java   |  5 +++--
 .../linux/runtime/DockerLinuxContainerRuntime.java| 14 +++---
 2 files changed, 10 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/502914ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
index d8db6ad..b5c933a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
@@ -108,6 +108,9 @@ public class DefaultLinuxContainerRuntime implements 
LinuxContainerRuntime {
   launchOp.appendArgs(tcCommandFile);
 }
 
+// Some failures here are acceptable. Let the calling executor decide.
+launchOp.disableFailureLogging();
+
 //List -> stored as List -> fetched/converted to List
 //we can't do better here thanks to type-erasure
 @SuppressWarnings("unchecked")
@@ -118,8 +121,6 @@ public class DefaultLinuxContainerRuntime implements 
LinuxContainerRuntime {
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
 launchOp, null, null, false, false);
 } catch (PrivilegedOperationException e) {
-  LOG.warn("Launch container failed. Exception: ", e);
-
   throw new ContainerExecutionException("Launch container failed", e
   .getExitCode(), e.getOutput(), e.getErrorOutput());
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/502914ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index ec1d055..0bacd03 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -914,13 +914,13 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 PrivilegedOperation launchOp = buildLaunchOp(ctx,
 commandFile, runCommand);
 
+// Some failures here are acceptable. Let the calling executor decide.
+launchOp.disableFailureLogging();
+
 try {
   privilegedOperationExecutor.executePrivilegedOperation(null,
   launchOp, null, null, false, false);
 } catch (PrivilegedOperationException e) {
-  LOG.warn("Launch container failed. Exception: ", e);
-  LOG.info("Docker command used: " + runCommand);
-
   throw new ContainerExecutionException("Launch container failed", e
   

[16/50] [abbrv] hadoop git commit: YARN-8212. Pending backlog for async allocation threads should be configurable. Contributed by Tao Yang.

2018-05-04 Thread xkrogen
YARN-8212. Pending backlog for async allocation threads should be configurable. 
Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d319e37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d319e37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d319e37

Branch: refs/heads/HDFS-12943
Commit: 2d319e37937c1e20c6a7dc4477ef88defd1f8464
Parents: a966ec6
Author: Weiwei Yang 
Authored: Tue May 1 09:47:10 2018 +0800
Committer: Weiwei Yang 
Committed: Tue May 1 09:47:10 2018 +0800

--
 .../scheduler/capacity/CapacityScheduler.java   | 9 -
 .../scheduler/capacity/CapacitySchedulerConfiguration.java  | 8 
 2 files changed, 16 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d319e37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 776e512..1d6c104 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -261,6 +261,7 @@ public class CapacityScheduler extends
   CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
   + ".scheduling-interval-ms";
   private static final long DEFAULT_ASYNC_SCHEDULER_INTERVAL = 5;
+  private long asyncMaxPendingBacklogs;
 
   public CapacityScheduler() {
 super(CapacityScheduler.class.getName());
@@ -379,6 +380,11 @@ public class CapacityScheduler extends
   asyncSchedulerThreads.add(new AsyncScheduleThread(this));
 }
 resourceCommitterService = new ResourceCommitterService(this);
+asyncMaxPendingBacklogs = this.conf.getInt(
+CapacitySchedulerConfiguration.
+SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS,
+CapacitySchedulerConfiguration.
+DEFAULT_SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS);
   }
 
   // Setup how many containers we can allocate for each round
@@ -573,7 +579,8 @@ public class CapacityScheduler extends
 Thread.sleep(100);
   } else {
 // Don't run schedule if we have some pending backlogs already
-if (cs.getAsyncSchedulingPendingBacklogs() > 100) {
+if (cs.getAsyncSchedulingPendingBacklogs()
+> cs.asyncMaxPendingBacklogs) {
   Thread.sleep(1);
 } else{
   schedule(cs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d319e37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index c41bd96..76eaac0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -246,6 +246,14 @@ public class CapacitySchedulerConfiguration extends 
ReservationSchedulerConfigur
   SCHEDULE_ASYNCHRONOUSLY_PREFIX + ".maximum-threads";
 
   @Private
+  public static final String SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS =
+  SCHEDULE_ASYNCHRONOUSLY_PREFIX + 

[43/50] [abbrv] hadoop git commit: HDFS-13525. RBF: Add unit test TestStateStoreDisabledNameservice. Contributed by Yiqun Lin.

2018-05-04 Thread xkrogen
HDFS-13525. RBF: Add unit test TestStateStoreDisabledNameservice. Contributed 
by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3b416f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3b416f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3b416f6

Branch: refs/heads/HDFS-12943
Commit: a3b416f69dc3965f247603f657df33bd74fd723e
Parents: 7698737
Author: Inigo Goiri 
Authored: Thu May 3 11:24:57 2018 -0700
Committer: Inigo Goiri 
Committed: Thu May 3 11:24:57 2018 -0700

--
 .../TestStateStoreDisabledNameservice.java  | 71 
 1 file changed, 71 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3b416f6/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreDisabledNameservice.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreDisabledNameservice.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreDisabledNameservice.java
new file mode 100644
index 000..353510a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreDisabledNameservice.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.clearRecords;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Set;
+
+import 
org.apache.hadoop.hdfs.server.federation.store.records.DisabledNameservice;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test the basic {@link StateStoreService}
+ * {@link DisabledNameserviceStore} functionality.
+ */
+public class TestStateStoreDisabledNameservice extends TestStateStoreBase {
+
+  private static DisabledNameserviceStore disabledStore;
+
+  @Before
+  public void setup() throws IOException, InterruptedException {
+disabledStore = getStateStore()
+.getRegisteredRecordStore(DisabledNameserviceStore.class);
+// Clear disabled nameservice registrations
+assertTrue(clearRecords(getStateStore(), DisabledNameservice.class));
+  }
+
+  @Test
+  public void testDisableNameservice() throws IOException {
+// no nameservices disabled firstly
+Set disabledNameservices = disabledStore.getDisabledNameservices();
+assertEquals(0, disabledNameservices.size());
+
+// disable two nameservices
+disabledStore.disableNameservice("ns0");
+disabledStore.disableNameservice("ns1");
+disabledStore.loadCache(true);
+// verify if the nameservices are disabled
+disabledNameservices = disabledStore.getDisabledNameservices();
+assertEquals(2, disabledNameservices.size());
+assertTrue(disabledNameservices.contains("ns0")
+&& disabledNameservices.contains("ns1"));
+
+// enable one nameservice
+disabledStore.enableNameservice("ns0");
+disabledStore.loadCache(true);
+// verify the disabled nameservice again
+disabledNameservices = disabledStore.getDisabledNameservices();
+assertEquals(1, disabledNameservices.size());
+assertTrue(disabledNameservices.contains("ns1"));
+  }
+}
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-12943

2018-05-04 Thread xkrogen
Merge branch 'trunk' into HDFS-12943

# Conflicts:
#   
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
#   
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a38fde5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a38fde5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a38fde5d

Branch: refs/heads/HDFS-12943
Commit: a38fde5d0a9c0a8d20204c9b546e6f0de58b6a2a
Parents: f8ee212 96c843f
Author: Erik Krogen 
Authored: Fri May 4 12:25:45 2018 -0700
Committer: Erik Krogen 
Committed: Fri May 4 12:25:45 2018 -0700

--
 BUILDING.txt| 2 +
 LICENSE.txt |68 +
 dev-support/bin/dist-layout-stitching   |22 +-
 dev-support/docker/Dockerfile   | 3 +
 .../assemblies/hadoop-src-with-hdsl.xml |56 +
 .../main/resources/assemblies/hadoop-src.xml| 2 +
 .../ensure-jars-have-correct-contents.sh| 6 +
 .../hadoop-client-minicluster/pom.xml   | 7 +
 .../hadoop-client-runtime/pom.xml   | 1 +
 .../src/main/bin/hadoop-functions.sh|14 +-
 .../hadoop-common/src/main/conf/hadoop-env.sh   |17 +
 .../src/main/conf/log4j.properties  |34 +
 .../org/apache/hadoop/conf/Configuration.java   |11 +-
 .../crypto/key/kms/KMSClientProvider.java   |   212 +-
 .../crypto/key/kms/KMSDelegationToken.java  |22 +-
 .../crypto/key/kms/KMSLegacyTokenRenewer.java   |56 +
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java  |   103 +
 .../hadoop/crypto/key/kms/package-info.java |18 +
 .../apache/hadoop/fs/ChecksumFileSystem.java| 9 +-
 .../hadoop/fs/CommonConfigurationKeys.java  | 4 +
 .../fs/CommonConfigurationKeysPublic.java   |10 +
 .../hadoop/fs/CompositeCrcFileChecksum.java |82 +
 .../java/org/apache/hadoop/fs/FileSystem.java   | 2 +-
 .../main/java/org/apache/hadoop/fs/Options.java |11 +
 .../org/apache/hadoop/fs/shell/Command.java |69 +-
 .../apache/hadoop/fs/shell/CopyCommands.java| 6 +
 .../java/org/apache/hadoop/fs/shell/Ls.java |26 +-
 .../org/apache/hadoop/fs/shell/PathData.java|27 +
 .../main/java/org/apache/hadoop/ipc/Client.java |16 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java| 5 +-
 .../main/java/org/apache/hadoop/ipc/RPC.java|46 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |11 +-
 .../apache/hadoop/ipc/WritableRpcEngine.java| 2 +-
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   |11 +-
 .../hadoop/metrics2/impl/MetricsConfig.java |50 +-
 .../java/org/apache/hadoop/net/NetUtils.java|16 +
 .../AuthenticationFilterInitializer.java|10 +-
 .../hadoop/security/UserGroupInformation.java   |10 +-
 .../web/DelegationTokenAuthenticatedURL.java|21 +-
 .../DelegationTokenAuthenticationHandler.java   | 8 +-
 .../web/DelegationTokenAuthenticator.java   | 2 +-
 .../hadoop/service/launcher/IrqHandler.java | 2 +-
 .../java/org/apache/hadoop/util/ConfTest.java   |10 +-
 .../org/apache/hadoop/util/CrcComposer.java |   187 +
 .../java/org/apache/hadoop/util/CrcUtil.java|   220 +
 .../org/apache/hadoop/util/DataChecksum.java|18 +
 .../hadoop/util/GenericOptionsParser.java   | 3 +
 .../java/org/apache/hadoop/util/KMSUtil.java|45 +-
 .../hadoop/util/KMSUtilFaultInjector.java   |49 +
 .../hadoop/util/concurrent/HadoopExecutors.java |34 +-
 ...apache.hadoop.security.token.TokenIdentifier | 1 +
 ...rg.apache.hadoop.security.token.TokenRenewer | 3 +-
 .../src/main/resources/core-default.xml |41 +
 .../src/site/markdown/CommandsManual.md |17 +
 .../src/site/markdown/HttpAuthentication.md | 2 +-
 .../markdown/release/3.0.2/CHANGES.3.0.2.md |31 +
 .../release/3.0.2/RELEASENOTES.3.0.2.md |31 +
 .../markdown/release/3.1.0/CHANGES.3.1.0.md |  1022 +
 .../release/3.1.0/RELEASENOTES.3.1.0.md |   199 +
 .../conf/TestCommonConfigurationFields.java | 3 +
 .../apache/hadoop/conf/TestConfiguration.java   |26 +-
 ...yptoStreamsWithOpensslAesCtrCryptoCodec.java | 2 +-
 .../crypto/key/kms/TestKMSClientProvider.java   |   162 +
 .../kms/TestLoadBalancingKMSClientProvider.java |67 +-
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 2 +-
 .../fs/contract/AbstractContractCreateTest.java |12 +-
 .../apache/hadoop/fs/shell/find/TestFind.java   |34 +-
 .../org/apache/hadoop/http/TestHttpServer.java  | 2 +-
 .../java/org/apache/hadoop/io/TestIOUtils.java  | 2 +-
 

[41/50] [abbrv] hadoop git commit: YARN-7961. Improve status message for YARN service. Contributed by Gour Saha

2018-05-04 Thread xkrogen
YARN-7961. Improve status message for YARN service.
   Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fe3214d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fe3214d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fe3214d

Branch: refs/heads/HDFS-12943
Commit: 7fe3214d4bb810c0da18dd936875b4e2588ba518
Parents: ee2ce92
Author: Eric Yang 
Authored: Thu May 3 13:27:07 2018 -0400
Committer: Eric Yang 
Committed: Thu May 3 13:27:07 2018 -0400

--
 .../yarn/service/client/ApiServiceClient.java|  7 +++
 .../hadoop/yarn/service/webapp/ApiServer.java| 10 +++---
 .../hadoop/yarn/service/ServiceClientTest.java   |  7 ---
 .../hadoop/yarn/service/TestApiServer.java   | 15 ++-
 .../service/client/TestApiServiceClient.java | 19 ++-
 5 files changed, 50 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe3214d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
index cdba555..757e664 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
@@ -479,6 +479,13 @@ public class ApiServiceClient extends AppAdminClient {
 try {
   ClientResponse response = getApiClient(getServicePath(appName))
   .get(ClientResponse.class);
+  if (response.getStatus() == 404) {
+StringBuilder sb = new StringBuilder();
+sb.append(" Service ");
+sb.append(appName);
+sb.append(" not found");
+return sb.toString();
+  }
   if (response.getStatus() != 200) {
 StringBuilder sb = new StringBuilder();
 sb.append(appName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe3214d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 9a30fcf..8c7c0ee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -186,7 +186,7 @@ public class ApiServer {
 ServiceStatus serviceStatus = new ServiceStatus();
 try {
   if (appName == null) {
-throw new IllegalArgumentException("Service name can not be null.");
+throw new IllegalArgumentException("Service name cannot be null.");
   }
   UserGroupInformation ugi = getProxyUser(request);
   LOG.info("GET: getService for appName = {} user = {}", appName, ugi);
@@ -194,12 +194,16 @@ public class ApiServer {
   return Response.ok(app).build();
 } catch (AccessControlException e) {
   return formatResponse(Status.FORBIDDEN, e.getMessage());
-} catch (IllegalArgumentException |
-FileNotFoundException e) {
+} catch (IllegalArgumentException e) {
   serviceStatus.setDiagnostics(e.getMessage());
   serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID);
   return Response.status(Status.NOT_FOUND).entity(serviceStatus)
   .build();
+} catch (FileNotFoundException e) {
+  serviceStatus.setDiagnostics("Service " + appName + " not found");
+  serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID);
+  return Response.status(Status.NOT_FOUND).entity(serviceStatus)
+  .build();
 } catch (IOException | InterruptedException e) {
   LOG.error("Get service failed: {}", e);
   return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());


[30/50] [abbrv] hadoop git commit: YARN-8222. Fix potential NPE when gets RMApp from RM context. Contributed by Tao Yang.

2018-05-04 Thread xkrogen
YARN-8222. Fix potential NPE when gets RMApp from RM context. Contributed by 
Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/251f5288
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/251f5288
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/251f5288

Branch: refs/heads/HDFS-12943
Commit: 251f528814c4a4647cac0af6effb9a73135db180
Parents: 3265b55
Author: Weiwei Yang 
Authored: Wed May 2 17:54:46 2018 +0800
Committer: Weiwei Yang 
Committed: Wed May 2 17:54:46 2018 +0800

--
 .../rmcontainer/RMContainerImpl.java| 30 +++-
 .../scheduler/SchedulerApplicationAttempt.java  | 13 +
 2 files changed, 23 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/251f5288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 541621b..b5c8e7c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
@@ -737,21 +738,22 @@ public class RMContainerImpl implements RMContainer {
 
 private static void updateAttemptMetrics(RMContainerImpl container) {
   Resource resource = container.getContainer().getResource();
-  RMAppAttempt rmAttempt = container.rmContext.getRMApps()
-  .get(container.getApplicationAttemptId().getApplicationId())
-  .getCurrentAppAttempt();
-
-  if (rmAttempt != null) {
-long usedMillis = container.finishTime - container.creationTime;
-rmAttempt.getRMAppAttemptMetrics()
-.updateAggregateAppResourceUsage(resource, usedMillis);
-// If this is a preempted container, update preemption metrics
-if (ContainerExitStatus.PREEMPTED == container.finishedStatus
-.getExitStatus()) {
+  RMApp app = container.rmContext.getRMApps()
+  .get(container.getApplicationAttemptId().getApplicationId());
+  if (app != null) {
+RMAppAttempt rmAttempt = app.getCurrentAppAttempt();
+if (rmAttempt != null) {
+  long usedMillis = container.finishTime - container.creationTime;
   rmAttempt.getRMAppAttemptMetrics()
-  .updatePreemptionInfo(resource, container);
-  rmAttempt.getRMAppAttemptMetrics()
-  .updateAggregatePreemptedAppResourceUsage(resource, usedMillis);
+  .updateAggregateAppResourceUsage(resource, usedMillis);
+  // If this is a preempted container, update preemption metrics
+  if (ContainerExitStatus.PREEMPTED == container.finishedStatus
+  .getExitStatus()) {
+rmAttempt.getRMAppAttemptMetrics()
+.updatePreemptionInfo(resource, container);
+rmAttempt.getRMAppAttemptMetrics()
+.updateAggregatePreemptedAppResourceUsage(resource, 
usedMillis);
+  }
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/251f5288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 

[23/50] [abbrv] hadoop git commit: MAPREDUCE-7086. Add config to allow FileInputFormat to ignore directories when recursive=false. Contributed by Sergey Shelukhin

2018-05-04 Thread xkrogen
MAPREDUCE-7086. Add config to allow FileInputFormat to ignore directories when 
recursive=false. Contributed by Sergey Shelukhin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68c6ec71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68c6ec71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68c6ec71

Branch: refs/heads/HDFS-12943
Commit: 68c6ec719da8e79ada31c8f3a82124f90b9a71fd
Parents: 24eeea8
Author: Jason Lowe 
Authored: Tue May 1 16:19:53 2018 -0500
Committer: Jason Lowe 
Committed: Tue May 1 16:19:53 2018 -0500

--
 .../apache/hadoop/mapred/FileInputFormat.java   | 25 ++--
 .../mapreduce/lib/input/FileInputFormat.java|  8 +++
 .../hadoop/mapred/TestFileInputFormat.java  | 17 -
 .../lib/input/TestFileInputFormat.java  | 12 ++
 4 files changed, 54 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c6ec71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
index b0ec979..fe43991 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
@@ -78,10 +78,13 @@ public abstract class FileInputFormat implements 
InputFormat {
 
   public static final String NUM_INPUT_FILES =
 org.apache.hadoop.mapreduce.lib.input.FileInputFormat.NUM_INPUT_FILES;
-  
+
   public static final String INPUT_DIR_RECURSIVE = 
 org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE;
 
+  public static final String INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS =
+
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS;
+
 
   private static final double SPLIT_SLOP = 1.1;   // 10% slop
 
@@ -319,16 +322,24 @@ public abstract class FileInputFormat implements 
InputFormat {
   public InputSplit[] getSplits(JobConf job, int numSplits)
 throws IOException {
 StopWatch sw = new StopWatch().start();
-FileStatus[] files = listStatus(job);
-
+FileStatus[] stats = listStatus(job);
+
 // Save the number of input files for metrics/loadgen
-job.setLong(NUM_INPUT_FILES, files.length);
+job.setLong(NUM_INPUT_FILES, stats.length);
 long totalSize = 0;   // compute total size
-for (FileStatus file: files) {// check we have valid files
+boolean ignoreDirs = !job.getBoolean(INPUT_DIR_RECURSIVE, false)
+  && job.getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false);
+
+List files = new ArrayList<>(stats.length);
+for (FileStatus file: stats) {// check we have valid files
   if (file.isDirectory()) {
-throw new IOException("Not a file: "+ file.getPath());
+if (!ignoreDirs) {
+  throw new IOException("Not a file: "+ file.getPath());
+}
+  } else {
+files.add(file);
+totalSize += file.getLen();
   }
-  totalSize += file.getLen();
 }
 
 long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c6ec71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
index 9868e8e..e2d8e6f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
@@ -76,6 +76,8 @@ public abstract class FileInputFormat extends 
InputFormat {
 

[28/50] [abbrv] hadoop git commit: HADOOP-15406. hadoop-nfs dependencies for mockito and junit are not test scope

2018-05-04 Thread xkrogen
HADOOP-15406. hadoop-nfs dependencies for mockito and junit are not test scope

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e07156e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e07156e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e07156e8

Branch: refs/heads/HDFS-12943
Commit: e07156e8b07552b877a22565641465e211144f6f
Parents: 3376872
Author: Jason Lowe 
Authored: Wed May 2 17:30:10 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed May 2 17:30:10 2018 +0900

--
 hadoop-common-project/hadoop-nfs/pom.xml | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e07156e8/hadoop-common-project/hadoop-nfs/pom.xml
--
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml 
b/hadoop-common-project/hadoop-nfs/pom.xml
index 8546112..80d8cd2 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -56,10 +56,12 @@
 
   junit
   junit
+  test
 
 
   org.mockito
   mockito-all
+  test
 
 
   commons-logging


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HADOOP-15434. Upgrade to ADLS SDK that exposes current timeout.

2018-05-04 Thread xkrogen
HADOOP-15434. Upgrade to ADLS SDK that exposes current timeout.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85381c7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85381c7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85381c7b

Branch: refs/heads/HDFS-12943
Commit: 85381c7b605b5f49664f101cf025e443c300b94c
Parents: e6a80e4
Author: Sean Mackrory 
Authored: Tue May 1 09:47:52 2018 -0600
Committer: Sean Mackrory 
Committed: Wed May 2 21:30:31 2018 -0600

--
 hadoop-tools/hadoop-azure-datalake/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85381c7b/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 57515b0..5603db9 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -33,7 +33,7 @@
 0.9.1
 UTF-8
 true
-
2.2.7
+
2.2.9
   
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-12943

2018-05-04 Thread xkrogen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
--
diff --cc 
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index ebeff94,0497931..507517b
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@@ -408,34 -404,11 +408,34 @@@ public class WritableRpcEngine implemen
  boolean verbose, SecretManager 
secretManager,
  String portRangeConfig) 
  throws IOException {
 +  this(null, protocolImpl,  conf,  bindAddress,   port,
 +  numHandlers,  numReaders,  queueSizePerHandler,  verbose,
 +  secretManager, null, null);
 +}
 +
 +/**
 + * Construct an RPC server.
 + * @param protocolClass - the protocol being registered
 + * can be null for compatibility with old usage (see below for 
details)
 + * @param protocolImpl the protocol impl that will be called
 + * @param conf the configuration to use
 + * @param bindAddress the address to bind on to listen for connection
 + * @param port the port to listen for connections on
 + * @param numHandlers the number of method handler threads to run
 + * @param verbose whether each call should be logged
 + * @param alignmentContext provides server state info on client responses
 + */
 +public Server(Class protocolClass, Object protocolImpl,
 +Configuration conf, String bindAddress,  int port,
 +int numHandlers, int numReaders, int queueSizePerHandler,
 +boolean verbose, SecretManager 
secretManager,
 +String portRangeConfig, AlignmentContext alignmentContext)
 +throws IOException {
super(bindAddress, port, null, numHandlers, numReaders,
queueSizePerHandler, conf,
-   classNameBase(protocolImpl.getClass().getName()), secretManager,
+   serverNameFromClass(protocolImpl.getClass()), secretManager,
portRangeConfig);
 -
 +  setAlignmentContext(alignmentContext);
this.verbose = verbose;



http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38fde5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: YARN-8079. Support static and archive unmodified local resources in service AM. Contributed by Suma Shivaprasad

2018-05-04 Thread xkrogen
YARN-8079. Support static and archive unmodified local resources in service AM. 
Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6795f807
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6795f807
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6795f807

Branch: refs/heads/HDFS-12943
Commit: 6795f8072ffbe6138857e77d51af173f33e4e5c1
Parents: 502914c
Author: Billie Rinaldi 
Authored: Fri May 4 09:27:07 2018 -0700
Committer: Billie Rinaldi 
Committed: Fri May 4 09:27:07 2018 -0700

--
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |   2 +
 .../yarn/service/api/records/ConfigFile.java|   3 +-
 .../yarn/service/conf/YarnServiceConstants.java |   1 +
 .../provider/AbstractClientProvider.java|  23 ++-
 .../provider/AbstractProviderService.java   |   4 +
 .../yarn/service/provider/ProviderUtils.java|  91 --
 .../service/provider/TestProviderUtils.java | 164 +++
 .../providers/TestAbstractClientProvider.java   |  44 +
 .../markdown/yarn-service/YarnServiceAPI.md |   4 +-
 9 files changed, 321 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index 8c5ad65..cea8296 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -475,6 +475,8 @@ definitions:
   - YAML
   - TEMPLATE
   - HADOOP_XML
+  - STATIC
+  - ARCHIVE
   dest_file:
 type: string
 description: The path that this configuration file should be created 
as. If it is an absolute path, it will be mounted into the DOCKER container. 
Absolute paths are only allowed for DOCKER containers.  If it is a relative 
path, only the file name should be provided, and the file will be created in 
the container local working directory under a folder named conf.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
index d3b18bc..623feed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
@@ -55,7 +55,8 @@ public class ConfigFile implements Serializable {
   @XmlEnum
   public enum TypeEnum {
 XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE(
-"TEMPLATE"), HADOOP_XML("HADOOP_XML");
+"TEMPLATE"), HADOOP_XML("HADOOP_XML"), STATIC("STATIC"), ARCHIVE(
+"ARCHIVE");
 
 private String value;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java
 

[05/50] [abbrv] hadoop git commit: YARN-8210. AMRMClient logging on every heartbeat to track updation of AM RM token causes too many log lines to be generated in AM logs. (Suma Shivaprasad via wangda)

2018-05-04 Thread xkrogen
YARN-8210. AMRMClient logging on every heartbeat to track updation of AM RM 
token causes too many log lines to be generated in AM logs. (Suma Shivaprasad 
via wangda)

Change-Id: I70edd6e301fd5e78d479e1882aedc9332a0827aa


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1833d9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1833d9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1833d9b

Branch: refs/heads/HDFS-12943
Commit: b1833d9ba2c078582161da45ac392dd5c361dcdf
Parents: ef3ecc3
Author: Wangda Tan 
Authored: Fri Apr 27 13:07:38 2018 -0700
Committer: Wangda Tan 
Committed: Fri Apr 27 13:07:38 2018 -0700

--
 .../java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1833d9b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index a8e4dfc..ef849b2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -,7 +,6 @@ public class AMRMClientImpl 
extends AMRMClient {
 // to ensure we replace the previous token setup by the RM.
 // Afterwards we can update the service address for the RPC layer.
 UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser();
-LOG.info("Updating with new AMRMToken");
 currentUGI.addToken(amrmToken);
 amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig()));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: HDDS-13. Refactor StorageContainerManager into seperate RPC endpoints. Contributed by Anu Engineer.

2018-05-04 Thread xkrogen
HDDS-13. Refactor StorageContainerManager into seperate RPC endpoints. 
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0c3dc4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0c3dc4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0c3dc4c

Branch: refs/heads/HDFS-12943
Commit: f0c3dc4cf40575497ca6f29c037e43fa50e0ffdd
Parents: 2d319e3
Author: Anu Engineer 
Authored: Mon Apr 30 21:41:10 2018 -0700
Committer: Anu Engineer 
Committed: Mon Apr 30 21:41:10 2018 -0700

--
 .../org/apache/hadoop/hdds/scm/SCMMXBean.java   |   50 -
 .../org/apache/hadoop/hdds/scm/SCMStorage.java  |   73 -
 .../hdds/scm/StorageContainerManager.java   | 1290 --
 .../scm/StorageContainerManagerHttpServer.java  |   76 --
 .../hadoop/hdds/scm/node/SCMNodeManager.java|5 +-
 .../hdds/scm/server/SCMBlockProtocolServer.java |  222 +++
 .../scm/server/SCMClientProtocolServer.java |  314 +
 .../scm/server/SCMDatanodeProtocolServer.java   |  350 +
 .../hadoop/hdds/scm/server/SCMMXBean.java   |   50 +
 .../hadoop/hdds/scm/server/SCMStorage.java  |   73 +
 .../scm/server/StorageContainerManager.java |  722 ++
 .../StorageContainerManagerHttpServer.java  |   77 ++
 .../hadoop/hdds/scm/server/package-info.java|   22 +
 .../TestStorageContainerManagerHttpServer.java  |7 +-
 hadoop-ozone/common/src/main/bin/ozone  |2 +-
 .../container/TestContainerStateManager.java|   29 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   34 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |   10 +-
 .../ozone/TestStorageContainerManager.java  |   20 +-
 .../TestStorageContainerManagerHelper.java  |2 +-
 .../ozone/ksm/TestContainerReportWithKeys.java  |2 +-
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   |8 +-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java |   13 +-
 .../apache/hadoop/ozone/scm/TestSCMMXBean.java  |2 +-
 .../apache/hadoop/ozone/scm/TestSCMMetrics.java |   16 +-
 25 files changed, 1912 insertions(+), 1557 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
deleted file mode 100644
index 17b6814..000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfo;
-
-import java.util.Map;
-
-/**
- *
- * This is the JMX management interface for scm information.
- */
-@InterfaceAudience.Private
-public interface SCMMXBean extends ServiceRuntimeInfo {
-
-  /**
-   * Get the SCM RPC server port that used to listen to datanode requests.
-   * @return SCM datanode RPC server port
-   */
-  String getDatanodeRpcPort();
-
-  /**
-   * Get the SCM RPC server port that used to listen to client requests.
-   * @return SCM client RPC server port
-   */
-  String getClientRpcPort();
-
-  /**
-   * Get container report info that includes container IO stats of nodes.
-   * @return The datanodeUUid to report json string mapping
-   */
-  Map getContainerReport();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java
deleted file mode 

[27/50] [abbrv] hadoop git commit: HADOOP-15377. Improve debug messages in MetricsConfig.java

2018-05-04 Thread xkrogen
HADOOP-15377. Improve debug messages in MetricsConfig.java

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33768724
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33768724
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33768724

Branch: refs/heads/HDFS-12943
Commit: 33768724ff99d4966c24c9553eef207ed31a76d3
Parents: 1a95a45
Author: BELUGA BEHR 
Authored: Wed May 2 17:09:22 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed May 2 17:09:22 2018 +0900

--
 .../hadoop/metrics2/impl/MetricsConfig.java | 50 
 1 file changed, 30 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33768724/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
index ac4a24e..027450c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
@@ -118,20 +118,23 @@ class MetricsConfig extends SubsetConfiguration {
 .setListDelimiterHandler(new DefaultListDelimiterHandler(',')))
   .getConfiguration()
   .interpolatedConfiguration();
-LOG.info("loaded properties from "+ fname);
-LOG.debug(toString(cf));
+LOG.info("Loaded properties from {}", fname);
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Properties: {}", toString(cf));
+}
 MetricsConfig mc = new MetricsConfig(cf, prefix);
-LOG.debug(mc.toString());
+LOG.debug("Metrics Config: {}", mc);
 return mc;
   } catch (ConfigurationException e) {
 // Commons Configuration defines the message text when file not found
 if (e.getMessage().startsWith("Could not locate")) {
+  LOG.debug("Could not locate file {}", fname, e);
   continue;
 }
 throw new MetricsConfigException(e);
   }
 }
-LOG.warn("Cannot locate configuration: tried "+
+LOG.warn("Cannot locate configuration: tried " +
  Joiner.on(",").join(fileNames));
 // default to an empty configuration
 return new MetricsConfig(new PropertiesConfiguration(), prefix);
@@ -168,7 +171,6 @@ class MetricsConfig extends SubsetConfiguration {
 
   Iterable keys() {
 return new Iterable() {
-  @SuppressWarnings("unchecked")
   @Override
   public Iterator iterator() {
 return (Iterator) getKeys();
@@ -186,21 +188,21 @@ class MetricsConfig extends SubsetConfiguration {
 Object value = super.getPropertyInternal(key);
 if (value == null) {
   if (LOG.isDebugEnabled()) {
-LOG.debug("poking parent '"+ getParent().getClass().getSimpleName() +
-  "' for key: "+ key);
+LOG.debug("poking parent '" + getParent().getClass().getSimpleName() +
+  "' for key: " + key);
   }
   return getParent().getProperty(key.startsWith(PREFIX_DEFAULT) ? key
  : PREFIX_DEFAULT + key);
 }
-if (LOG.isDebugEnabled()) {
-  LOG.debug("returning '"+ value +"' for key: "+ key);
-}
+LOG.debug("Returning '{}' for key: {}", value, key);
 return value;
   }
 
T getPlugin(String name) {
 String clsName = getClassName(name);
-if (clsName == null) return null;
+if (clsName == null) {
+  return null;
+}
 try {
   Class cls = Class.forName(clsName, true, getPluginLoader());
   @SuppressWarnings("unchecked")
@@ -213,9 +215,9 @@ class MetricsConfig extends SubsetConfiguration {
   }
 
   String getClassName(String prefix) {
-String classKey = prefix.isEmpty() ? "class" : prefix +".class";
+String classKey = prefix.isEmpty() ? "class" : prefix.concat(".class");
 String clsName = getString(classKey);
-LOG.debug(clsName);
+LOG.debug("Class name for prefix {} is {}", prefix, clsName);
 if (clsName == null || clsName.isEmpty()) {
   return null;
 }
@@ -223,25 +225,29 @@ class MetricsConfig extends SubsetConfiguration {
   }
 
   ClassLoader getPluginLoader() {
-if (pluginLoader != null) return pluginLoader;
+if (pluginLoader != null) {
+  return pluginLoader;
+}
 final ClassLoader defaultLoader = getClass().getClassLoader();
 Object purls = super.getProperty(PLUGIN_URLS_KEY);
-if (purls 

[50/50] [abbrv] hadoop git commit: HDFS-13286. [SBN read] Add haadmin commands to transition between standby and observer. Contributed by Chao Sun.

2018-05-04 Thread xkrogen
HDFS-13286. [SBN read] Add haadmin commands to transition between standby and 
observer. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7f27391
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7f27391
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7f27391

Branch: refs/heads/HDFS-12943
Commit: f7f27391e19a88fd180ddd744686184cd0158690
Parents: a38fde5
Author: Erik Krogen 
Authored: Fri May 4 12:22:12 2018 -0700
Committer: Erik Krogen 
Committed: Fri May 4 12:27:03 2018 -0700

--
 .../apache/hadoop/ha/FailoverController.java|  2 +-
 .../main/java/org/apache/hadoop/ha/HAAdmin.java | 42 +++
 .../org/apache/hadoop/ha/HAServiceProtocol.java | 18 
 .../hadoop/ha/HAServiceProtocolHelper.java  |  9 
 .../org/apache/hadoop/ha/HAServiceTarget.java   |  7 
 ...HAServiceProtocolClientSideTranslatorPB.java | 16 +++
 ...HAServiceProtocolServerSideTranslatorPB.java | 20 +
 .../src/main/proto/HAServiceProtocol.proto  | 20 +
 .../org/apache/hadoop/ha/DummyHAService.java| 18 +++-
 .../org/apache/hadoop/ha/MiniZKFCCluster.java   |  4 ++
 .../FederationNamenodeServiceState.java |  3 ++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  4 ++
 .../hdfs/server/datanode/BPServiceActor.java|  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   | 27 
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 
 .../hdfs/server/namenode/ha/StandbyState.java   | 12 +++---
 .../hadoop/hdfs/tools/NNHAServiceTarget.java|  5 +++
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto |  1 +
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java   |  6 +++
 .../hdfs/tools/TestDFSHAAdminMiniCluster.java   | 44 
 .../server/resourcemanager/AdminService.java|  7 
 22 files changed, 258 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
index b86ae29..4fc52d5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
@@ -129,7 +129,7 @@ public class FailoverController {
 
 if (!toSvcStatus.getState().equals(HAServiceState.STANDBY)) {
   throw new FailoverFailedException(
-  "Can't failover to an active service");
+  "Can't failover to an " + toSvcStatus.getState() + " service");
 }
 
 if (!toSvcStatus.isReadyToBecomeActive()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7f27391/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index 9b7d7ba..61700f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -72,6 +72,9 @@ public abstract class HAAdmin extends Configured implements 
Tool {
 new UsageInfo("[--"+FORCEACTIVE+"] ", "Transitions the 
service into Active state"))
 .put("-transitionToStandby",
 new UsageInfo("", "Transitions the service into Standby 
state"))
+  .put("-transitionToObserver",
+  new UsageInfo("",
+  "Transitions the service into Observer state"))
 .put("-failover",
 new UsageInfo("[--"+FORCEFENCE+"] [--"+FORCEACTIVE+"]  
",
 "Failover from the first service to the second.\n" +
@@ -221,6 +224,28 @@ public abstract class HAAdmin extends Configured 
implements Tool {
 HAServiceProtocolHelper.transitionToStandby(proto, createReqInfo());
 return 0;
   }
+
+  private int transitionToObserver(final CommandLine cmd)
+  throws IOException, ServiceFailedException {
+String[] argv = cmd.getArgs();
+if (argv.length != 1) {
+  errOut.println("transitionToObserver: incorrect number of arguments");
+  printUsage(errOut, "-transitionToObserver");
+  return -1;
+}
+
+HAServiceTarget target = resolveTarget(argv[0]);
+if 

[14/50] [abbrv] hadoop git commit: HADOOP-15239 S3ABlockOutputStream.flush() be no-op when stream closed. Contributed by Gabor Bota.

2018-05-04 Thread xkrogen
HADOOP-15239 S3ABlockOutputStream.flush() be no-op when stream closed.  
Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/919865a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/919865a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/919865a3

Branch: refs/heads/HDFS-12943
Commit: 919865a34bd5c3c99603993a0410846a97975869
Parents: fc074a3
Author: Aaron Fabbri 
Authored: Mon Apr 30 16:02:57 2018 -0700
Committer: Aaron Fabbri 
Committed: Mon Apr 30 16:02:57 2018 -0700

--
 .../hadoop/fs/s3a/S3ABlockOutputStream.java |  7 ++-
 .../hadoop/fs/s3a/TestS3ABlockOutputStream.java | 66 
 2 files changed, 72 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/919865a3/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
index 96de8e4..bdffed4 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
@@ -238,7 +238,12 @@ class S3ABlockOutputStream extends OutputStream implements
*/
   @Override
   public synchronized void flush() throws IOException {
-checkOpen();
+try {
+  checkOpen();
+} catch (IOException e) {
+  LOG.warn("Stream closed: " + e.getMessage());
+  return;
+}
 S3ADataBlocks.DataBlock dataBlock = getActiveBlock();
 if (dataBlock != null) {
   dataBlock.flush();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/919865a3/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
new file mode 100644
index 000..ff176f5
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import org.apache.hadoop.fs.s3a.commit.PutTracker;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+
+/**
+ * Unit tests for {@link S3ABlockOutputStream}.
+ */
+public class TestS3ABlockOutputStream extends AbstractS3AMockTest {
+
+  private S3ABlockOutputStream stream;
+
+  @Before
+  public void setUp() throws Exception {
+ExecutorService executorService = mock(ExecutorService.class);
+Progressable progressable = mock(Progressable.class);
+S3ADataBlocks.BlockFactory blockFactory =
+mock(S3ADataBlocks.BlockFactory.class);
+long blockSize = Constants.DEFAULT_MULTIPART_SIZE;
+S3AInstrumentation.OutputStreamStatistics statistics = null;
+WriteOperationHelper oHelper = mock(WriteOperationHelper.class);
+PutTracker putTracker = mock(PutTracker.class);
+stream = spy(new S3ABlockOutputStream(fs, "", executorService,
+  progressable, blockSize, blockFactory, statistics, oHelper,
+  putTracker));
+  }
+
+  @Test
+  public void testFlushNoOpWhenStreamClosed() throws Exception {
+doThrow(new IOException()).when(stream).checkOpen();
+
+try {
+  stream.flush();
+} catch (Exception e){
+  fail("Should not have any exception.");
+}
+  }
+}



[34/50] [abbrv] hadoop git commit: YARN-8113. Update placement constraints doc with application namespaces and inter-app constraints. Contributed by Weiwei Yang.

2018-05-04 Thread xkrogen
YARN-8113. Update placement constraints doc with application namespaces and 
inter-app constraints. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b34fca4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b34fca4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b34fca4

Branch: refs/heads/HDFS-12943
Commit: 3b34fca4b5d67a2685852f30bb61e7c408a0e886
Parents: 883f682
Author: Konstantinos Karanasos 
Authored: Wed May 2 11:48:35 2018 -0700
Committer: Konstantinos Karanasos 
Committed: Wed May 2 11:49:56 2018 -0700

--
 .../site/markdown/PlacementConstraints.md.vm| 67 +++-
 1 file changed, 52 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b34fca4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
index cb34c3f..4ac1683 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
@@ -28,7 +28,7 @@ YARN allows applications to specify placement constraints in 
the form of data lo
 
 For example, it may be beneficial to co-locate the allocations of a job on the 
same rack (*affinity* constraints) to reduce network costs, spread allocations 
across machines (*anti-affinity* constraints) to minimize resource 
interference, or allow up to a specific number of allocations in a node group 
(*cardinality* constraints) to strike a balance between the two. Placement 
decisions also affect resilience. For example, allocations placed within the 
same cluster upgrade domain would go offline simultaneously.
 
-The applications can specify constraints without requiring knowledge of the 
underlying topology of the cluster (e.g., one does not need to specify the 
specific node or rack where their containers should be placed with constraints) 
or the other applications deployed. Currently **intra-application** constraints 
are supported, but the design that is followed is generic and support for 
constraints across applications will soon be added. Moreover, all constraints 
at the moment are **hard**, that is, if the constraints for a container cannot 
be satisfied due to the current cluster condition or conflicting constraints, 
the container request will remain pending or get will get rejected.
+The applications can specify constraints without requiring knowledge of the 
underlying topology of the cluster (e.g., one does not need to specify the 
specific node or rack where their containers should be placed with constraints) 
or the other applications deployed. Currently, all constraints are **hard**, 
that is, if a constraint for a container cannot be satisfied due to the current 
cluster condition or conflicting constraints, the container request will remain 
pending or get rejected.
 
 Note that in this document we use the notion of “allocation” to refer to a 
unit of resources (e.g., CPU and memory) that gets allocated in a node. In the 
current implementation of YARN, an allocation corresponds to a single 
container. However, in case an application uses an allocation to spawn more 
than one containers, an allocation could correspond to multiple containers.
 
@@ -65,15 +65,19 @@ $ yarn 
org.apache.hadoop.yarn.applications.distributedshell.Client -jar share/ha
 where **PlacementSpec** is of the form:
 
 ```
-PlacementSpec => "" | KeyVal;PlacementSpec
-KeyVal=> SourceTag=Constraint
-SourceTag => String
-Constraint=> NumContainers | NumContainers,"IN",Scope,TargetTag | 
NumContainers,"NOTIN",Scope,TargetTag | 
NumContainers,"CARDINALITY",Scope,TargetTag,MinCard,MaxCard
-NumContainers => int
-Scope => "NODE" | "RACK"
-TargetTag => String
-MinCard   => int
-MaxCard   => int
+PlacementSpec => "" | KeyVal;PlacementSpec
+KeyVal=> SourceTag=ConstraintExpr
+SourceTag => String
+ConstraintExpr=> NumContainers | NumContainers, Constraint
+Constraint=> SingleConstraint | CompositeConstraint
+SingleConstraint  => "IN",Scope,TargetTag | "NOTIN",Scope,TargetTag | 
"CARDINALITY",Scope,TargetTag,MinCard,MaxCard
+CompositeConstraint   => AND(ConstraintList) | OR(ConstraintList)
+ConstraintList=> Constraint | Constraint:ConstraintList
+NumContainers => int
+Scope => "NODE" | "RACK"
+TargetTag

[42/50] [abbrv] hadoop git commit: YARN-8226. Improved anti-affinity description in YARN Service doc. Contributed by Gour Saha

2018-05-04 Thread xkrogen
YARN-8226. Improved anti-affinity description in YARN Service doc.
   Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76987372
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76987372
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76987372

Branch: refs/heads/HDFS-12943
Commit: 7698737207b01e80b1be2b4df60363f952a1c2b4
Parents: 7fe3214
Author: Eric Yang 
Authored: Thu May 3 13:35:40 2018 -0400
Committer: Eric Yang 
Committed: Thu May 3 13:35:40 2018 -0400

--
 .../main/resources/definition/YARN-Services-Examples.md   | 10 +++---
 .../src/site/markdown/yarn-service/YarnServiceAPI.md  | 10 +++---
 2 files changed, 14 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76987372/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
index a4ef2d2..83e558c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
@@ -351,9 +351,13 @@ POST URL - http://localhost:8088/app/v1/services
 # GET Response JSON
 GET URL - http://localhost:8088/app/v1/services/hello-world
 
-Note, that the 3 containers will come up on 3 different nodes. If there are 
less
-than 3 NMs running in the cluster, then all 3 container requests will not be
-fulfilled and the service will be in non-STABLE state.
+Note, for an anti-affinity component no more than 1 container will be allocated
+in a specific node. In this example, 3 containers have been requested by
+component "hello". All 3 containers were allocated because the cluster had 3 or
+more NMs. If the cluster had less than 3 NMs then less than 3 containers would
+be allocated. In cases when the number of allocated containers are less than 
the
+number of requested containers, the component and the service will be in
+non-STABLE state.
 
 ```json
 {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76987372/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index 496c1a1..fab33c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -766,9 +766,13 @@ POST URL - http://localhost:8088/app/v1/services
 # GET Response JSON
 GET URL - http://localhost:8088/app/v1/services/hello-world
 
-Note, that the 3 containers will come up on 3 different nodes. If there are 
less
-than 3 NMs running in the cluster, then all 3 container requests will not be
-fulfilled and the service will be in non-STABLE state.
+Note, for an anti-affinity component no more than 1 container will be allocated
+in a specific node. In this example, 3 containers have been requested by
+component "hello". All 3 containers were allocated because the cluster had 3 or
+more NMs. If the cluster had less than 3 NMs then less than 3 containers would
+be allocated. In cases when the number of allocated containers are less than 
the
+number of requested containers, the component and the service will be in
+non-STABLE state.
 
 ```json
 {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: YARN-7781. Update YARN service documentation. Contributed by Gour Saha

2018-05-04 Thread xkrogen
YARN-7781. Update YARN service documentation.
   Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24a5ccbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24a5ccbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24a5ccbf

Branch: refs/heads/HDFS-12943
Commit: 24a5ccbf4bda413a98480d52c204d56f82ef9ac5
Parents: 14b4799
Author: Eric Yang 
Authored: Fri Apr 27 12:38:30 2018 -0400
Committer: Eric Yang 
Committed: Fri Apr 27 12:38:30 2018 -0400

--
 .../definition/YARN-Services-Examples.md| 236 +--
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |  21 +-
 .../yarn/service/api/records/BaseResource.java  |   2 +-
 .../site/markdown/yarn-service/QuickStart.md|  13 +-
 .../markdown/yarn-service/YarnServiceAPI.md |  61 +++--
 5 files changed, 279 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24a5ccbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
index 22f941e..a4ef2d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
@@ -15,7 +15,7 @@
 ## Examples
 
 ### Create a simple single-component service with most attribute values as 
defaults
-POST URL - http://localhost:9191/ws/v1/services
+POST URL - http://localhost:8088/app/v1/services
 
 # POST Request JSON
 ```json
@@ -27,7 +27,7 @@ POST URL - http://localhost:9191/ws/v1/services
 [
   {
 "name": "hello",
-"number_of_containers": 1,
+"number_of_containers": 2,
 "artifact": {
   "id": "nginx:latest",
   "type": "DOCKER"
@@ -36,14 +36,14 @@ POST URL - http://localhost:9191/ws/v1/services
 "resource": {
   "cpus": 1,
   "memory": "256"
-   }
+}
   }
 ]
 }
 ```
 
 # GET Response JSON
-GET URL - http://localhost:9191/ws/v1/services/hello-world
+GET URL - http://localhost:8088/app/v1/services/hello-world
 
 Note, lifetime value of -1 means unlimited lifetime.
 
@@ -54,10 +54,11 @@ Note, lifetime value of -1 means unlimited lifetime.
 "description": "hello world example",
 "id": "application_1503963985568_0002",
 "lifetime": -1,
+"state": "STABLE",
 "components": [
 {
 "name": "hello",
-"dependencies": [],
+"state": "STABLE",
 "resource": {
 "cpus": 1,
 "memory": "256"
@@ -70,21 +71,21 @@ Note, lifetime value of -1 means unlimited lifetime.
 "quicklinks": [],
 "containers": [
 {
-"id": "container_e03_1503963985568_0002_01_01",
+"id": "container_e03_1503963985568_0002_01_02",
 "ip": "10.22.8.143",
-"hostname": "myhost.local",
+"hostname": 
"ctr-e03-1503963985568-0002-01-02.example.site",
 "state": "READY",
 "launch_time": 1504051512412,
-"bare_host": "10.22.8.143",
+"bare_host": "host100.cloud.com",
 "component_instance_name": "hello-0"
 },
 {
-"id": "container_e03_1503963985568_0002_01_02",
-"ip": "10.22.8.143",
-"hostname": "myhost.local",
+"id": "container_e03_1503963985568_0002_01_03",
+"ip": "10.22.8.144",
+"hostname": 
"ctr-e03-1503963985568-0002-01-03.example.site",
 "state": "READY",
 "launch_time": 1504051536450,
-"bare_host": "10.22.8.143",
+"bare_host": "host100.cloud.com",
 "component_instance_name": "hello-1"
 }
 ],
@@ -103,7 +104,7 @@ Note, lifetime value of -1 means unlimited lifetime.
 
 ```
 ### Update to modify the lifetime of a service
-PUT URL - http://localhost:9191/ws/v1/services/hello-world
+PUT URL - http://localhost:8088/app/v1/services/hello-world
 
 

[46/50] [abbrv] hadoop git commit: YARN-8223. Improved yarn auxiliary service to load jar file from HDFS. Contributed by Zian Chen

2018-05-04 Thread xkrogen
YARN-8223.  Improved yarn auxiliary service to load jar file from HDFS.
Contributed by Zian Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cdb032a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cdb032a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cdb032a

Branch: refs/heads/HDFS-12943
Commit: 8cdb032aff4237d8d3970057d82290e4e32c4040
Parents: 6795f80
Author: Eric Yang 
Authored: Fri May 4 12:36:31 2018 -0400
Committer: Eric Yang 
Committed: Fri May 4 12:36:31 2018 -0400

--
 .../PluggableShuffleAndPluggableSort.md | 44 
 .../containermanager/AuxServices.java   | 19 -
 2 files changed, 61 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cdb032a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
index 5ea0567..9e24103 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
@@ -67,6 +67,50 @@ The collector class configuration may specify a 
comma-separated list of collecto
 |: |: |: |
 | `yarn.nodemanager.aux-services` | `...,mapreduce_shuffle` | The auxiliary 
service name |
 | `yarn.nodemanager.aux-services.mapreduce_shuffle.class` | 
`org.apache.hadoop.mapred.ShuffleHandler` | The auxiliary service class to use |
+| `yarn.nodemanager.aux-services.%s.classpath` | NONE | local directory which 
includes the related jar file as well as all the dependencies’ jar file. We 
could specify the single jar file or use /dep/* to load all jars under the dep 
directory. |
+| `yarn.nodemanager.aux-services.%s.remote-classpath` | NONE | The remote 
absolute or relative path to jar file |
+
+ Example of loading jar file from HDFS:
+
+```xml
+
+
+yarn.nodemanager.aux-services
+mapreduce_shuffle,AuxServiceFromHDFS
+
+
+
+
yarn.nodemanager.aux-services.AuxServiceFromHDFS.remote-classpath
+/aux/test/aux-service-hdfs.jar
+
+
+
+yarn.nodemanager.aux-services.AuxServiceFromHDFS.class/name>
+org.apache.auxtest.AuxServiceFromHDFS2
+
+
+```
+
+ Example of loading jar file from local file system:
+
+```xml
+
+
+yarn.nodemanager.aux-services
+mapreduce_shuffle,AuxServiceFromHDFS
+
+
+
+yarn.nodemanager.aux-services.AuxServiceFromHDFS.classpath
+/aux/test/aux-service-hdfs.jar
+
+
+
+yarn.nodemanager.aux-services.AuxServiceFromHDFS.class/name>
+org.apache.auxtest.AuxServiceFromHDFS2
+
+
+```
 
 **IMPORTANT:** If setting an auxiliary service in addition the default
 `mapreduce_shuffle` service, then a new service key should be added to the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cdb032a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index c8b7a76..3fe3cfd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -230,15 +230,30 @@ public class AuxServices extends AbstractService
   }
 }
 if (reDownload) {
+  LocalResourceType srcType = null;
+  String lowerDst = StringUtils.toLowerCase(src.toString());
+  if (lowerDst.endsWith(".jar")) {
+srcType = LocalResourceType.FILE;
+  } else if 

[21/50] [abbrv] hadoop git commit: HDFS-13503. Fix TestFsck test failures on Windows. Contributed by Xiao Liang.

2018-05-04 Thread xkrogen
HDFS-13503. Fix TestFsck test failures on Windows. Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e2cfb2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e2cfb2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e2cfb2d

Branch: refs/heads/HDFS-12943
Commit: 9e2cfb2d3f1a18984d07c81f9c46626dd842402a
Parents: 4e1382a
Author: Inigo Goiri 
Authored: Tue May 1 08:12:46 2018 -0700
Committer: Inigo Goiri 
Committed: Tue May 1 08:12:46 2018 -0700

--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 123 ---
 2 files changed, 81 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e2cfb2d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index acb720e..c2e2a68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -2924,7 +2924,8 @@ public class MiniDFSCluster implements AutoCloseable {
* @return Storage directory
*/
   public File getStorageDir(int dnIndex, int dirIndex) {
-return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
+return new File(determineDfsBaseDir(),
+getStorageDirPath(dnIndex, dirIndex));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e2cfb2d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index f80fd70..1a392da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.MiniDFSCluster.HDFS_MINIDFS_BASEDIR;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -209,7 +210,9 @@ public class TestFsck {
 conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
 precision);
 conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1L);
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+.numDataNodes(4).build();
 fs = cluster.getFileSystem();
 final String fileName = "/srcdat";
 util.createFiles(fs, fileName);
@@ -297,7 +300,9 @@ public class TestFsck {
 setNumFiles(20).build();
 FileSystem fs = null;
 conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1L);
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+.numDataNodes(4).build();
 fs = cluster.getFileSystem();
 util.createFiles(fs, "/srcdat");
 util.waitReplication(fs, "/srcdat", (short)3);
@@ -315,7 +320,9 @@ public class TestFsck {
 conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1L);
 
 // Create a cluster with the current user, write some files
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+.numDataNodes(4).build();
 final MiniDFSCluster c2 = cluster;
 final String dir = "/dfsck";
 final Path dirpath = new Path(dir);
@@ -361,8 +368,9 @@ public class TestFsck {
 DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3,
 (5 * dfsBlockSize) + (dfsBlockSize - 1), 5 * dfsBlockSize);
 FileSystem fs = null;
-cluster = new MiniDFSCluster.Builder(conf).
-numDataNodes(numDatanodes).build();
+File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+cluster = new 

[47/50] [abbrv] hadoop git commit: HADOOP-15444 ITestS3GuardToolDynamo should only run with -Ddynamo (Aaron Fabbri)

2018-05-04 Thread xkrogen
HADOOP-15444 ITestS3GuardToolDynamo should only run with -Ddynamo (Aaron Fabbri)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96c843f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96c843f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96c843f6

Branch: refs/heads/HDFS-12943
Commit: 96c843f64bb424cd7544be0ccda16a6755c086de
Parents: 8cdb032
Author: Aaron Fabbri 
Authored: Fri May 4 11:34:37 2018 -0700
Committer: Aaron Fabbri 
Committed: Fri May 4 11:34:45 2018 -0700

--
 .../hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96c843f6/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
index c7dffd2..821bba5 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
@@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import com.amazonaws.services.dynamodbv2.document.DynamoDB;
 import com.amazonaws.services.dynamodbv2.document.Table;
 import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException;
+import org.junit.Assume;
 import org.junit.Test;
 
 import org.apache.hadoop.conf.Configuration;
@@ -51,6 +52,14 @@ public class ITestS3GuardToolDynamoDB extends 
AbstractS3GuardToolTestBase {
 return new DynamoDBMetadataStore();
   }
 
+  @Override
+  public void setup() throws Exception {
+super.setup();
+Assume.assumeTrue("Test only applies when DynamoDB is used for S3Guard",
+getConfiguration().get(Constants.S3_METADATA_STORE_IMPL).equals(
+Constants.S3GUARD_METASTORE_DYNAMO));
+  }
+
   // Check the existence of a given DynamoDB table.
   private static boolean exist(DynamoDB dynamoDB, String tableName) {
 assertNotNull(dynamoDB);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: HDFS-13434. RBF: Fix dead links in RBF document. Contributed by Chetna Chaudhari.

2018-05-04 Thread xkrogen
HDFS-13434. RBF: Fix dead links in RBF document. Contributed by Chetna 
Chaudhari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f469628b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f469628b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f469628b

Branch: refs/heads/HDFS-12943
Commit: f469628bba350ba79bc6a0d38f9dc1cb5eb65c77
Parents: 92c5331
Author: Inigo Goiri 
Authored: Fri Apr 27 15:13:47 2018 -0700
Committer: Inigo Goiri 
Committed: Fri Apr 27 15:13:47 2018 -0700

--
 .../src/site/markdown/HDFSRouterFederation.md   | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f469628b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index 43e89ed..70c6226 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -21,7 +21,7 @@ Introduction
 
 
 NameNodes have scalability limits because of the metadata overhead comprised 
of inodes (files and directories) and file blocks, the number of Datanode 
heartbeats, and the number of HDFS RPC client requests.
-The common solution is to split the filesystem into smaller subclusters [HDFS 
Federation](./Federation.html) and provide a federated view 
[ViewFs](./ViewFs.html).
+The common solution is to split the filesystem into smaller subclusters [HDFS 
Federation](../hadoop-hdfs/Federation.html) and provide a federated view 
[ViewFs](../hadoop-hdfs/ViewFs.html).
 The problem is how to maintain the split of the subclusters (e.g., namespace 
partition), which forces users to connect to multiple subclusters and manage 
the allocation of folders/files to them.
 
 
@@ -37,8 +37,8 @@ This layer must be scalable, highly available, and fault 
tolerant.
 
 This federation layer comprises multiple components.
 The _Router_ component that has the same interface as a NameNode, and forwards 
the client requests to the correct subcluster, based on ground-truth 
information from a State Store.
-The _State Store_ combines a remote _Mount Table_ (in the flavor of 
[ViewFs](./ViewFs.html), but shared between clients) and utilization 
(load/capacity) information about the subclusters.
-This approach has the same architecture as [YARN 
federation](../hadoop-yarn/Federation.html).
+The _State Store_ combines a remote _Mount Table_ (in the flavor of 
[ViewFs](../hadoop-hdfs/ViewFs.html), but shared between clients) and 
utilization (load/capacity) information about the subclusters.
+This approach has the same architecture as [YARN 
federation](../../hadoop-yarn/hadoop-yarn-site/Federation.html).
 
 ![Router-based Federation Sequence Diagram | 
width=800](./images/routerfederation.png)
 
@@ -140,7 +140,7 @@ Examples users may encounter include the following.
 ### Quota management
 Federation supports and controls global quota at mount table level.
 For performance reasons, the Router caches the quota usage and updates it 
periodically. These quota usage values
-will be used for quota-verification during each WRITE RPC call invoked in 
RouterRPCSever. See [HDFS Quotas Guide](./HdfsQuotaAdminGuide.html)
+will be used for quota-verification during each WRITE RPC call invoked in 
RouterRPCSever. See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html)
 for the quota detail.
 
 ### State Store
@@ -163,7 +163,7 @@ The Routers discard the entries older than a certain 
threshold (e.g., ten Router
 
 * **Mount Table**:
 This table hosts the mapping between folders and subclusters.
-It is similar to the mount table in [ViewFs](.ViewFs.html) where it specifies 
the federated folder, the destination subcluster and the path in that folder.
+It is similar to the mount table in [ViewFs](../hadoop-hdfs/ViewFs.html) where 
it specifies the federated folder, the destination subcluster and the path in 
that folder.
 
 
 ### Security
@@ -175,7 +175,7 @@ Deployment
 
 By default, the Router is ready to take requests and monitor the NameNode in 
the local machine.
 It needs to know the State Store endpoint by setting 
`dfs.federation.router.store.driver.class`.
-The rest of the options are documented in 
[hdfs-default.xml](./hdfs-default.xml).
+The rest of the options are documented in 
[hdfs-default.xml](../hadoop-hdfs/hdfs-default.xml).
 
 Once the Router is configured, it can be started:
 
@@ -187,7 +187,7 @@ And to stop it:
 
 ### Mount table management
 

[31/50] [abbrv] hadoop git commit: HADOOP-12071. conftest is not documented. Contributed by Kengo Seki.

2018-05-04 Thread xkrogen
HADOOP-12071. conftest is not documented.
Contributed by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe649bb3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe649bb3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe649bb3

Branch: refs/heads/HDFS-12943
Commit: fe649bb3051f5647073c840d7334a90265ea3f06
Parents: 251f528
Author: Steve Loughran 
Authored: Wed May 2 13:33:56 2018 +0100
Committer: Steve Loughran 
Committed: Wed May 2 13:33:56 2018 +0100

--
 .../main/java/org/apache/hadoop/util/ConfTest.java | 10 --
 .../src/site/markdown/CommandsManual.md| 17 +
 2 files changed, 21 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe649bb3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
index 1915e79..a2cb85f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
@@ -84,7 +84,7 @@ public final class ConfTest {
 QName property = new QName("property");
 
 List nodes = new ArrayList();
-Stack parsed = new Stack();
+Stack parsed = new Stack<>();
 
 XMLInputFactory factory = XMLInputFactory.newInstance();
 XMLEventReader reader = factory.createXMLEventReader(in);
@@ -258,9 +258,7 @@ public final class ConfTest {
 if (confFile.isFile()) {
   files.add(confFile);
 } else if (confFile.isDirectory()) {
-  for (File file : listFiles(confFile)) {
-files.add(file);
-  }
+  files.addAll(Arrays.asList(listFiles(confFile)));
 } else {
   terminate(1, confFile.getAbsolutePath()
   + " is neither a file nor directory");
@@ -313,9 +311,9 @@ class NodeInfo {
   private StartElement startElement;
   private List attributes = new ArrayList();
   private Map elements =
-  new HashMap();
+  new HashMap<>();
   private Map qNameXMLEventsMap =
-  new HashMap();
+  new HashMap<>();
 
   public NodeInfo(StartElement startElement) {
 this.startElement = startElement;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe649bb3/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 2839503..ce904c5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -99,6 +99,23 @@ Usage: `hadoop classpath [--glob |--jar  |-h |--help]`
 
 Prints the class path needed to get the Hadoop jar and the required libraries. 
If called without arguments, then prints the classpath set up by the command 
scripts, which is likely to contain wildcards in the classpath entries. 
Additional options print the classpath after wildcard expansion or write the 
classpath into the manifest of a jar file. The latter is useful in environments 
where wildcards cannot be used and the expanded classpath exceeds the maximum 
supported command line length.
 
+### `conftest`
+
+Usage: `hadoop conftest [-conffile ]...`
+
+| COMMAND\_OPTION | Description |
+|: |: |
+| `-conffile` | Path of a configuration file or directory to validate |
+| `-h`, `--help` | print help |
+
+Validates configuration XML files.
+If the `-conffile` option is not specified, the files in `${HADOOP_CONF_DIR}` 
whose name end with .xml will be verified. If specified, that path will be 
verified. You can specify either a file or directory, and if a directory 
specified, the files in that directory whose name end with `.xml` will be 
verified.
+You can specify `-conffile` option multiple times.
+
+The validation is fairly minimal: the XML is parsed and duplicate and empty
+property names are checked for. The command does not support XInclude; if you
+using that to pull in configuration items, it will declare the XML file 
invalid.
+
 ### `credential`
 
 Usage: `hadoop credential  [options]`


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, 

[36/50] [abbrv] hadoop git commit: YARN-8194. Fixed reinitialization error for LinuxContainerExecutor. Contributed by Chandni Singh

2018-05-04 Thread xkrogen
YARN-8194.  Fixed reinitialization error for LinuxContainerExecutor.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d280f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d280f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d280f0

Branch: refs/heads/HDFS-12943
Commit: f4d280f02b557885cd5e5cf36abc36eb579ccfb4
Parents: 6b63a0a
Author: Eric Yang 
Authored: Wed May 2 20:07:19 2018 -0400
Committer: Eric Yang 
Committed: Wed May 2 20:07:19 2018 -0400

--
 .../launcher/ContainerLaunch.java   | 37 
 .../launcher/ContainerRelaunch.java | 36 ++-
 2 files changed, 39 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d280f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 9efe686..fa77899 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -20,6 +20,8 @@ package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
 
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -844,6 +846,7 @@ public class ContainerLaunch implements Callable {
   throw new IOException("Reap container failed for container "
   + containerIdStr);
 }
+cleanupContainerFiles(getContainerWorkDir());
   }
 
   /**
@@ -1858,4 +1861,38 @@ public class ContainerLaunch implements 
Callable {
   context.getNMStateStore().storeContainerWorkDir(containerId, workDir);
 }
   }
+
+  protected Path getContainerWorkDir() throws IOException {
+String containerWorkDir = container.getWorkDir();
+if (containerWorkDir == null
+|| !dirsHandler.isGoodLocalDir(containerWorkDir)) {
+  throw new IOException(
+  "Could not find a good work dir " + containerWorkDir
+  + " for container " + container);
+}
+
+return new Path(containerWorkDir);
+  }
+
+  /**
+   * Clean up container's files for container relaunch or cleanup.
+   */
+  protected void cleanupContainerFiles(Path containerWorkDir) {
+LOG.debug("cleanup container {} files", containerWorkDir);
+// delete ContainerScriptPath
+deleteAsUser(new Path(containerWorkDir, CONTAINER_SCRIPT));
+// delete TokensPath
+deleteAsUser(new Path(containerWorkDir, FINAL_CONTAINER_TOKENS_FILE));
+  }
+
+  private void deleteAsUser(Path path) {
+try {
+  exec.deleteAsUser(new DeletionAsUserContext.Builder()
+  .setUser(container.getUser())
+  .setSubDir(path)
+  .build());
+} catch (Exception e) {
+  LOG.warn("Failed to delete " + path, e);
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d280f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
index c6e3ed4..f69cf96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
+++ 

[12/50] [abbrv] hadoop git commit: YARN-8195. Fix constraint cardinality check in the presence of multiple target allocation tags. Contributed by Weiwei Yang.

2018-05-04 Thread xkrogen
YARN-8195. Fix constraint cardinality check in the presence of multiple target 
allocation tags. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b095554
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b095554
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b095554

Branch: refs/heads/HDFS-12943
Commit: 9b0955545174abe16fd81240db30f175145ee89b
Parents: 3d43474
Author: Konstantinos Karanasos 
Authored: Mon Apr 30 11:54:30 2018 -0700
Committer: Konstantinos Karanasos 
Committed: Mon Apr 30 11:54:30 2018 -0700

--
 .../constraint/PlacementConstraintsUtil.java|  8 +-
 .../TestPlacementConstraintsUtil.java   | 88 
 2 files changed, 92 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b095554/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index efa7b65..f47e1d4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -91,20 +91,20 @@ public final class PlacementConstraintsUtil {
 if (sc.getScope().equals(PlacementConstraints.NODE)) {
   if (checkMinCardinality) {
 minScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(),
-allocationTags, Long::max);
+allocationTags, Long::min);
   }
   if (checkMaxCardinality) {
 maxScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(),
-allocationTags, Long::min);
+allocationTags, Long::max);
   }
 } else if (sc.getScope().equals(PlacementConstraints.RACK)) {
   if (checkMinCardinality) {
 minScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(),
-allocationTags, Long::max);
+allocationTags, Long::min);
   }
   if (checkMaxCardinality) {
 maxScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(),
-allocationTags, Long::min);
+allocationTags, Long::max);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b095554/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
index 3248450..dc61981 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
@@ -42,6 +42,7 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import java.util.concurrent.atomic.AtomicLong;
+import com.google.common.collect.ImmutableMap;
 
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -228,6 +229,93 @@ public class TestPlacementConstraintsUtil {
   }
 
   @Test
+  public void testMultiTagsPlacementConstraints()
+  throws InvalidAllocationTagsQueryException {
+PlacementConstraintManagerService pcm =
+new 

[32/50] [abbrv] hadoop git commit: HDFS-11807. libhdfs++: Get minidfscluster tests running under valgrind. Contributed by Anatoli Shein.

2018-05-04 Thread xkrogen
HDFS-11807. libhdfs++: Get minidfscluster tests running under valgrind.  
Contributed by Anatoli Shein.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19ae588f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19ae588f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19ae588f

Branch: refs/heads/HDFS-12943
Commit: 19ae588fde9930c042cdb2848b8a1a0ff514b575
Parents: fe649bb
Author: James Clampffer 
Authored: Wed May 2 11:49:12 2018 -0400
Committer: James Clampffer 
Committed: Wed May 2 11:49:12 2018 -0400

--
 .../src/main/native/libhdfs-tests/expect.h  |  60 +
 .../libhdfs-tests/test_libhdfs_mini_stress.c| 253 ++-
 .../src/main/native/libhdfspp/CMakeLists.txt|   2 +-
 .../main/native/libhdfspp/tests/CMakeLists.txt  |   6 +
 .../main/native/libhdfspp/tests/memcheck.supp   |  27 ++
 5 files changed, 279 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ae588f/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
index 528c96f..d843b67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
@@ -132,6 +132,54 @@ struct hdfsFile_internal;
 } \
 } while (0);
 
+#define EXPECT_INT_LT(x, y) \
+do { \
+int __my_ret__ = x; \
+int __my_errno__ = errno; \
+if (__my_ret__ >= (y)) { \
+fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+  "code %d (errno: %d): expected less than %d\n", \
+   __FILE__, __LINE__, __my_ret__, __my_errno__, (y)); \
+return -1; \
+} \
+} while (0);
+
+#define EXPECT_INT_LE(x, y) \
+do { \
+int __my_ret__ = x; \
+int __my_errno__ = errno; \
+if (__my_ret__ > (y)) { \
+fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+  "code %d (errno: %d): expected less than or equal %d\n", \
+   __FILE__, __LINE__, __my_ret__, __my_errno__, (y)); \
+return -1; \
+} \
+} while (0);
+
+#define EXPECT_INT_GT(x, y) \
+do { \
+int __my_ret__ = x; \
+int __my_errno__ = errno; \
+if (__my_ret__ <= (y)) { \
+fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+  "code %d (errno: %d): expected greater than %d\n", \
+   __FILE__, __LINE__, __my_ret__, __my_errno__, (y)); \
+return -1; \
+} \
+} while (0);
+
+#define EXPECT_INT_GE(x, y) \
+do { \
+int __my_ret__ = x; \
+int __my_errno__ = errno; \
+if (__my_ret__ < (y)) { \
+fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+  "code %d (errno: %d): expected greater than or equal %d\n", \
+   __FILE__, __LINE__, __my_ret__, __my_errno__, (y)); \
+return -1; \
+} \
+} while (0);
+
 #define EXPECT_INT64_EQ(x, y) \
 do { \
 int64_t __my_ret__ = y; \
@@ -144,6 +192,18 @@ struct hdfsFile_internal;
 } \
 } while (0);
 
+#define ASSERT_INT64_EQ(x, y) \
+do { \
+int64_t __my_ret__ = y; \
+int __my_errno__ = errno; \
+if (__my_ret__ != (x)) { \
+fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+  "value %"PRId64" (errno: %d): expected %"PRId64"\n", \
+   __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+exit(EXIT_FAILURE); \
+} \
+} while (0);
+
 #define EXPECT_UINT64_EQ(x, y) \
 do { \
 uint64_t __my_ret__ = y; \

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ae588f/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
index dca4782..9054287 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+#include "common/util_c.h"
 

[22/50] [abbrv] hadoop git commit: YARN-7799. Improved YARN service jar file handling. Contributed by Billie Rinaldi

2018-05-04 Thread xkrogen
YARN-7799. Improved YARN service jar file handling.
   Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24eeea8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24eeea8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24eeea8b

Branch: refs/heads/HDFS-12943
Commit: 24eeea8b18749e02ea05b03eb18e3cf0455077c5
Parents: 9e2cfb2
Author: Eric Yang 
Authored: Tue May 1 16:46:34 2018 -0400
Committer: Eric Yang 
Committed: Tue May 1 16:46:34 2018 -0400

--
 .../hadoop-yarn-services-core/pom.xml   |   5 +
 .../yarn/service/client/ServiceClient.java  |  95 +++
 .../yarn/service/utils/CoreFileSystem.java  |   7 ++
 .../yarn/service/client/TestServiceCLI.java | 118 +--
 .../markdown/yarn-service/Configurations.md |   2 +-
 5 files changed, 195 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24eeea8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
index 3ce8876..7efe8bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
@@ -173,6 +173,11 @@
 
 
 
+  org.apache.hadoop
+  hadoop-hdfs
+
+
+
   com.google.protobuf
   protobuf-java
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24eeea8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 8dd5342..67306d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -28,7 +28,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.registry.client.api.RegistryConstants;
@@ -37,8 +39,8 @@ import 
org.apache.hadoop.registry.client.api.RegistryOperationsFactory;
 import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest;
@@ -896,13 +898,13 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
 
   protected Path addJarResource(String serviceName,
   Map localResources)
-  throws IOException, SliderException {
+  throws IOException, YarnException {
 Path libPath = fs.buildClusterDirPath(serviceName);
 ProviderUtils
 .addProviderJar(localResources, ServiceMaster.class, SERVICE_CORE_JAR, 
fs,
 libPath, "lib", false);
 Path dependencyLibTarGzip = fs.getDependencyTarGzip();
-if (fs.isFile(dependencyLibTarGzip)) {
+if (actionDependency(null, false) == EXIT_SUCCESS) {
   LOG.info("Loading lib tar from " + dependencyLibTarGzip);
   

[33/50] [abbrv] hadoop git commit: YARN-8209. Fixed NPE in Yarn Service deletion. Contributed by Eric Badger

2018-05-04 Thread xkrogen
YARN-8209.  Fixed NPE in Yarn Service deletion.
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/883f6822
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/883f6822
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/883f6822

Branch: refs/heads/HDFS-12943
Commit: 883f68222a9cfd06f79a8fcd75ec9fef00abc035
Parents: 19ae588
Author: Eric Yang 
Authored: Wed May 2 14:33:31 2018 -0400
Committer: Eric Yang 
Committed: Wed May 2 14:33:31 2018 -0400

--
 .../linux/privileged/PrivilegedOperation.java   |  4 +-
 .../runtime/DockerLinuxContainerRuntime.java| 21 +
 .../linux/runtime/docker/DockerClient.java  |  7 ++-
 .../linux/runtime/docker/DockerCommand.java | 32 +
 .../runtime/docker/DockerCommandExecutor.java   | 12 ++---
 .../runtime/docker/DockerInspectCommand.java| 19 
 .../linux/runtime/docker/DockerRmCommand.java   | 16 +++
 .../impl/container-executor.c   | 28 
 .../impl/container-executor.h   | 10 -
 .../main/native/container-executor/impl/main.c  | 47 ++--
 .../docker/TestDockerCommandExecutor.java   | 31 +++--
 11 files changed, 184 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
index 189c0d0..92a82e8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
@@ -54,7 +54,9 @@ public class PrivilegedOperation {
 GPU("--module-gpu"),
 FPGA("--module-fpga"),
 LIST_AS_USER(""), // no CLI switch supported yet.
-ADD_NUMA_PARAMS(""); // no CLI switch supported yet.
+ADD_NUMA_PARAMS(""), // no CLI switch supported yet.
+REMOVE_DOCKER_CONTAINER("--remove-docker-container"),
+INSPECT_DOCKER_CONTAINER("--inspect-docker-container");
 
 private final String option;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f6822/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 9c05c59..ec1d055 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -22,6 +22,7 @@ package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerCommand;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerCommandExecutor;
@@ -384,7 +385,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   Container container) throws 

[19/50] [abbrv] hadoop git commit: YARN-8187. [UI2] Individual Node page does not contain breadcrumb trail. Contributed by Zian Chen.

2018-05-04 Thread xkrogen
YARN-8187. [UI2] Individual Node page does not contain breadcrumb trail. 
Contributed by Zian Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6139c51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6139c51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6139c51

Branch: refs/heads/HDFS-12943
Commit: d6139c5106a469df72c1551100d550371f6cb7c7
Parents: f0c3dc4
Author: Sunil G 
Authored: Tue May 1 14:01:34 2018 +0530
Committer: Sunil G 
Committed: Tue May 1 14:01:34 2018 +0530

--
 .../src/main/webapp/app/templates/yarn-node.hbs | 25 
 .../webapp/app/templates/yarn-node/info.hbs |  2 --
 .../app/templates/yarn-node/yarn-nm-gpu.hbs |  2 --
 3 files changed, 25 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6139c51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
new file mode 100644
index 000..d82b175
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
@@ -0,0 +1,25 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+{{breadcrumb-bar breadcrumbs=breadcrumbs}}
+
+
+  
+{{outlet}}
+  
+
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6139c51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
index ad411c0..a2c708e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
@@ -16,8 +16,6 @@
   limitations under the License.
 --}}
 
-{{breadcrumb-bar breadcrumbs=breadcrumbs}}
-
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6139c51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs
index 0464cc8..f3aafe5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/yarn-nm-gpu.hbs
@@ -16,8 +16,6 @@
   limitations under the License.
 --}}
 
-{{breadcrumb-bar breadcrumbs=breadcrumbs}}
-
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: YARN-2674. Fix distributed shell AM container relaunch during RM work preserving restart. Contributed by Shane Kumpf

2018-05-04 Thread xkrogen
YARN-2674. Fix distributed shell AM container relaunch during RM work 
preserving restart. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e1382ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e1382ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e1382ac

Branch: refs/heads/HDFS-12943
Commit: 4e1382aca4cf23ca229bdd24e0f143c22449b329
Parents: d6139c5
Author: Billie Rinaldi 
Authored: Mon Apr 30 14:34:51 2018 -0700
Committer: Billie Rinaldi 
Committed: Tue May 1 07:27:47 2018 -0700

--
 .../distributedshell/ApplicationMaster.java | 68 +---
 .../distributedshell/TestDSAppMaster.java   |  8 +--
 2 files changed, 46 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e1382ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 75f4073..cca5676 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -31,6 +31,7 @@ import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -105,6 +106,7 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
@@ -1060,32 +1062,48 @@ public class ApplicationMaster {
 public void onContainersAllocated(List allocatedContainers) {
   LOG.info("Got response from RM for container ask, allocatedCnt="
   + allocatedContainers.size());
-  numAllocatedContainers.addAndGet(allocatedContainers.size());
   for (Container allocatedContainer : allocatedContainers) {
-String yarnShellId = Integer.toString(yarnShellIdCounter);
-yarnShellIdCounter++;
-LOG.info("Launching shell command on a new container."
-+ ", containerId=" + allocatedContainer.getId()
-+ ", yarnShellId=" + yarnShellId
-+ ", containerNode=" + allocatedContainer.getNodeId().getHost()
-+ ":" + allocatedContainer.getNodeId().getPort()
-+ ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress()
-+ ", containerResourceMemory"
-+ allocatedContainer.getResource().getMemorySize()
-+ ", containerResourceVirtualCores"
-+ allocatedContainer.getResource().getVirtualCores());
-// + ", containerToken"
-// +allocatedContainer.getContainerToken().getIdentifier().toString());
-
-Thread launchThread = createLaunchContainerThread(allocatedContainer,
-yarnShellId);
-
-// launch and start the container on a separate thread to keep
-// the main thread unblocked
-// as all containers may not be allocated at one go.
-launchThreads.add(launchThread);
-launchedContainers.add(allocatedContainer.getId());
-launchThread.start();
+if (numAllocatedContainers.get() == numTotalContainers) {
+  LOG.info("The requested number of containers have been allocated."
+  + " Releasing the extra container allocation from the RM.");
+  amRMClient.releaseAssignedContainer(allocatedContainer.getId());
+} else {
+  numAllocatedContainers.addAndGet(1);
+  String yarnShellId = Integer.toString(yarnShellIdCounter);
+   

[40/50] [abbrv] hadoop git commit: YARN-8217. RmAuthenticationFilterInitializer and TimelineAuthenticationFilterInitializer should use Configuration.getPropsWithPrefix instead of iterator. Contributed

2018-05-04 Thread xkrogen
YARN-8217. RmAuthenticationFilterInitializer and 
TimelineAuthenticationFilterInitializer should use 
Configuration.getPropsWithPrefix instead of iterator. Contributed by Suma 
Shivaprasad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee2ce923
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee2ce923
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee2ce923

Branch: refs/heads/HDFS-12943
Commit: ee2ce923a922bfc3e89ad6f0f6a25e776fe91ffb
Parents: 85381c7
Author: Rohith Sharma K S 
Authored: Thu May 3 10:01:02 2018 +0530
Committer: Rohith Sharma K S 
Committed: Thu May 3 14:43:40 2018 +0530

--
 .../http/RMAuthenticationFilterInitializer.java | 51 ++--
 ...TimelineAuthenticationFilterInitializer.java | 47 +++-
 .../security/TestRMAuthenticationFilter.java| 81 
 3 files changed, 98 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee2ce923/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
index 9fc1334..d0cde9e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
@@ -18,23 +18,13 @@
 
 package org.apache.hadoop.yarn.server.security.http;
 
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
-import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import 
org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
 import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
@@ -43,48 +33,23 @@ import 
org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
 public class RMAuthenticationFilterInitializer extends FilterInitializer {
 
   String configPrefix;
-  String kerberosPrincipalProperty;
-  String cookiePath;
 
   public RMAuthenticationFilterInitializer() {
 this.configPrefix = "hadoop.http.authentication.";
-this.kerberosPrincipalProperty = KerberosAuthenticationHandler.PRINCIPAL;
-this.cookiePath = "/";
   }
 
   protected Map createFilterConfig(Configuration conf) {
-Map filterConfig = new HashMap();
-
-// setting the cookie path to root '/' so it is used for all resources.
-filterConfig.put(AuthenticationFilter.COOKIE_PATH, cookiePath);
+Map filterConfig = AuthenticationFilterInitializer
+.getFilterConfigMap(conf, configPrefix);
 
 // Before conf object is passed in, RM has already processed it and used RM
 // specific configs to overwrite hadoop common ones. Hence we just need to
 // source hadoop.proxyuser configs here.
-for (Map.Entry entry : conf) {
-  String propName = entry.getKey();
-  if (propName.startsWith(configPrefix)) {
-String value = conf.get(propName);
-String name = propName.substring(configPrefix.length());
-filterConfig.put(name, value);
-  } else if (propName.startsWith(ProxyUsers.CONF_HADOOP_PROXYUSER)) {
-String value = conf.get(propName);
-String name = propName.substring("hadoop.".length());
-filterConfig.put(name, value);
-  }
-}
 
-// Resolve _HOST into bind 

[17/50] [abbrv] hadoop git commit: HDDS-13. Refactor StorageContainerManager into seperate RPC endpoints. Contributed by Anu Engineer.

2018-05-04 Thread xkrogen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
new file mode 100644
index 000..e42b887
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
+ * information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache
+ * License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.server;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SendContainerReportProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
+
+
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.versionCommand;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.registeredCommand;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.sendContainerReport;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.reregisterCommand;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.deleteBlocksCommand;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.closeContainerCommand;
+
+
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
+import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
+import org.apache.hadoop.ozone.protocolPB
+.StorageContainerDatanodeProtocolServerSideTranslatorPB;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import 

[25/50] [abbrv] hadoop git commit: HDFS-13488. RBF: Reject requests when a Router is overloaded. Contributed by Inigo Goiri.

2018-05-04 Thread xkrogen
HDFS-13488. RBF: Reject requests when a Router is overloaded. Contributed by 
Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37269261
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37269261
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37269261

Branch: refs/heads/HDFS-12943
Commit: 37269261d1232bc71708f30c76193188258ef4bd
Parents: 8f42daf
Author: Yiqun Lin 
Authored: Wed May 2 14:49:39 2018 +0800
Committer: Yiqun Lin 
Committed: Wed May 2 14:49:39 2018 +0800

--
 .../federation/metrics/FederationRPCMBean.java  |   2 +
 .../metrics/FederationRPCMetrics.java   |  10 +
 .../FederationRPCPerformanceMonitor.java|   5 +
 .../server/federation/router/RBFConfigKeys.java |   3 +
 .../federation/router/RouterRpcClient.java  |  31 ++-
 .../federation/router/RouterRpcMonitor.java |   6 +
 .../federation/router/RouterRpcServer.java  |  11 +-
 .../router/RouterSafeModeException.java |  53 
 .../src/main/resources/hdfs-rbf-default.xml |   9 +
 .../server/federation/FederationTestUtils.java  |   2 +-
 .../server/federation/StateStoreDFSCluster.java |  28 +++
 .../router/TestRouterClientRejectOverload.java  | 243 +++
 .../router/TestRouterRPCClientRetries.java  |  51 +---
 .../federation/router/TestRouterSafemode.java   |   3 +-
 14 files changed, 349 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
index 3e031fe..973c398 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
@@ -40,6 +40,8 @@ public interface FederationRPCMBean {
 
   long getProxyOpFailureStandby();
 
+  long getProxyOpFailureClientOverloaded();
+
   long getProxyOpNotImplemented();
 
   long getProxyOpRetries();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
index 94d3383..9ab4e5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
@@ -54,6 +54,8 @@ public class FederationRPCMetrics implements 
FederationRPCMBean {
   private MutableCounterLong proxyOpFailureStandby;
   @Metric("Number of operations to hit a standby NN")
   private MutableCounterLong proxyOpFailureCommunicate;
+  @Metric("Number of operations to hit a client overloaded Router")
+  private MutableCounterLong proxyOpFailureClientOverloaded;
   @Metric("Number of operations not implemented")
   private MutableCounterLong proxyOpNotImplemented;
   @Metric("Number of operation retries")
@@ -118,6 +120,14 @@ public class FederationRPCMetrics implements 
FederationRPCMBean {
 return proxyOpFailureCommunicate.value();
   }
 
+  public void incrProxyOpFailureClientOverloaded() {
+proxyOpFailureClientOverloaded.incr();
+  }
+
+  @Override
+  public long getProxyOpFailureClientOverloaded() {
+return proxyOpFailureClientOverloaded.value();
+  }
 
   public void incrProxyOpNotImplemented() {
 proxyOpNotImplemented.incr();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37269261/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
 

[37/50] [abbrv] hadoop git commit: HDFS-13481. TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed intermittently (Contributed by Gabor Bota via Daniel Templeton)

2018-05-04 Thread xkrogen
HDFS-13481. TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed 
intermittently
(Contributed by Gabor Bota via Daniel Templeton)

Change-Id: I9921981dfa69669fe7912dd2a31ae8b638283204


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87c23ef6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87c23ef6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87c23ef6

Branch: refs/heads/HDFS-12943
Commit: 87c23ef643393c39e8353ca9f495b0c8f97cdbd9
Parents: f4d280f
Author: Daniel Templeton 
Authored: Wed May 2 16:54:42 2018 -0700
Committer: Daniel Templeton 
Committed: Wed May 2 17:13:40 2018 -0700

--
 .../hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c23ef6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
index da85b9b..0f90d82 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
@@ -182,7 +182,8 @@ public class RollingFileSystemSinkTestBase {
 .add(prefix + ".sink.mysink0.ignore-error", ignoreErrors)
 .add(prefix + ".sink.mysink0.allow-append", allowAppend)
 .add(prefix + ".sink.mysink0.roll-offset-interval-millis", 0)
-.add(prefix + ".sink.mysink0.roll-interval", "1h");
+.add(prefix + ".sink.mysink0.roll-interval", "1h")
+.add("*.queue.capacity", 2);
 
 if (useSecureParams) {
   builder.add(prefix + ".sink.mysink0.keytab-key", SINK_KEYTAB_FILE_KEY)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: HDFS-13508. RBF: Normalize paths (automatically) when adding, updating, removing or listing mount table entries. Contributed by Ekanth S.

2018-05-04 Thread xkrogen
HDFS-13508. RBF: Normalize paths (automatically) when adding, updating, 
removing or listing mount table entries. Contributed by Ekanth S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48444060
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48444060
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48444060

Branch: refs/heads/HDFS-12943
Commit: 484440602c5b69fbd8106010603c61ae051056dd
Parents: f469628
Author: Inigo Goiri 
Authored: Fri Apr 27 16:28:17 2018 -0700
Committer: Inigo Goiri 
Committed: Fri Apr 27 16:28:17 2018 -0700

--
 .../hdfs/tools/federation/RouterAdmin.java  |  16 +++
 .../federation/router/TestRouterAdminCLI.java   | 117 ++-
 2 files changed, 130 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48444060/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 17707dc..b0a2062 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -26,6 +26,7 @@ import java.util.Map;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -322,6 +323,7 @@ public class RouterAdmin extends Configured implements Tool 
{
   public boolean addMount(String mount, String[] nss, String dest,
   boolean readonly, DestinationOrder order, ACLEntity aclInfo)
   throws IOException {
+mount = normalizeFileSystemPath(mount);
 // Get the existing entry
 MountTableManager mountTable = client.getMountTableManager();
 GetMountTableEntriesRequest getRequest =
@@ -473,6 +475,7 @@ public class RouterAdmin extends Configured implements Tool 
{
   public boolean updateMount(String mount, String[] nss, String dest,
   boolean readonly, DestinationOrder order, ACLEntity aclInfo)
   throws IOException {
+mount = normalizeFileSystemPath(mount);
 MountTableManager mountTable = client.getMountTableManager();
 
 // Create a new entry
@@ -519,6 +522,7 @@ public class RouterAdmin extends Configured implements Tool 
{
* @throws IOException If it cannot be removed.
*/
   public boolean removeMount(String path) throws IOException {
+path = normalizeFileSystemPath(path);
 MountTableManager mountTable = client.getMountTableManager();
 RemoveMountTableEntryRequest request =
 RemoveMountTableEntryRequest.newInstance(path);
@@ -538,6 +542,7 @@ public class RouterAdmin extends Configured implements Tool 
{
* @throws IOException If it cannot be listed.
*/
   public void listMounts(String path) throws IOException {
+path = normalizeFileSystemPath(path);
 MountTableManager mountTable = client.getMountTableManager();
 GetMountTableEntriesRequest request =
 GetMountTableEntriesRequest.newInstance(path);
@@ -798,6 +803,17 @@ public class RouterAdmin extends Configured implements 
Tool {
   }
 
   /**
+   * Normalize a path for that filesystem.
+   *
+   * @param path Path to normalize.
+   * @return Normalized path.
+   */
+  private static String normalizeFileSystemPath(final String path) {
+Path normalizedPath = new Path(path);
+return normalizedPath.toString();
+  }
+
+  /**
* Inner class that stores ACL info of mount table.
*/
   static class ACLEntity {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48444060/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 4e84c33..2537c19 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ 

[10/50] [abbrv] hadoop git commit: HDFS-13509. Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix TestFileAppend failures on Windows. Contributed by Xiao Liang.

2018-05-04 Thread xkrogen
HDFS-13509. Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix 
TestFileAppend failures on Windows. Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb7fe1d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb7fe1d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb7fe1d5

Branch: refs/heads/HDFS-12943
Commit: eb7fe1d588de903be2ff6e20384c25c184881532
Parents: 2c95eb8
Author: Inigo Goiri 
Authored: Sat Apr 28 09:05:30 2018 -0700
Committer: Inigo Goiri 
Committed: Sat Apr 28 09:05:30 2018 -0700

--
 .../hdfs/server/datanode/LocalReplica.java  | 18 ++---
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 71 +---
 2 files changed, 55 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb7fe1d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
index 2c5af11..68126a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
@@ -186,16 +186,18 @@ abstract public class LocalReplica extends ReplicaInfo {
 final FileIoProvider fileIoProvider = getFileIoProvider();
 final File tmpFile = DatanodeUtil.createFileWithExistsCheck(
 getVolume(), b, DatanodeUtil.getUnlinkTmpFile(file), fileIoProvider);
-try (FileInputStream in = fileIoProvider.getFileInputStream(
-getVolume(), file)) {
-  try (FileOutputStream out = fileIoProvider.getFileOutputStream(
-  getVolume(), tmpFile)) {
-IOUtils.copyBytes(in, out, 16 * 1024);
+try {
+  try (FileInputStream in = fileIoProvider.getFileInputStream(
+  getVolume(), file)) {
+try (FileOutputStream out = fileIoProvider.getFileOutputStream(
+getVolume(), tmpFile)) {
+  IOUtils.copyBytes(in, out, 16 * 1024);
+}
   }
   if (file.length() != tmpFile.length()) {
-throw new IOException("Copy of file " + file + " size " + 
file.length()+
-  " into file " + tmpFile +
-  " resulted in a size of " + tmpFile.length());
+throw new IOException("Copy of file " + file + " size " + file.length()
++ " into file " + tmpFile + " resulted in a size of "
++ tmpFile.length());
   }
   fileIoProvider.replaceFile(getVolume(), tmpFile, file);
 } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb7fe1d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index 20cec6a..aa8afb0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -55,6 +55,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.junit.Assert;
@@ -120,7 +121,9 @@ public class TestFileAppend{
   @Test
   public void testBreakHardlinksIfNeeded() throws IOException {
 Configuration conf = new HdfsConfiguration();
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+.build();
 FileSystem fs = cluster.getFileSystem();
 InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
@@ -186,7 +189,9 @@ public class TestFileAppend{
   public void testSimpleFlush() throws IOException {
 Configuration conf = new HdfsConfiguration();
 fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-   

[15/50] [abbrv] hadoop git commit: YARN-8228. Added hostname length check for docker container. Contributed by Shane Kumpf

2018-05-04 Thread xkrogen
YARN-8228.  Added hostname length check for docker container.
Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a966ec6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a966ec6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a966ec6e

Branch: refs/heads/HDFS-12943
Commit: a966ec6e23b3ac8e233b2cf9b9ddaa6628a8c996
Parents: 919865a
Author: Eric Yang 
Authored: Mon Apr 30 19:12:53 2018 -0400
Committer: Eric Yang 
Committed: Mon Apr 30 19:12:53 2018 -0400

--
 .../linux/runtime/DockerLinuxContainerRuntime.java |  6 ++
 .../linux/runtime/TestDockerContainerRuntime.java  | 13 +
 2 files changed, 19 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a966ec6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 999b343..9c05c59 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -199,6 +199,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   HOSTNAME_PATTERN);
   private static final Pattern USER_MOUNT_PATTERN = Pattern.compile(
   "(?<=^|,)([^:\\x00]+):([^:\\x00]+):([a-z]+)");
+  private static final int HOST_NAME_LENGTH = 64;
 
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_IMAGE =
@@ -541,6 +542,11 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 throw new ContainerExecutionException("Hostname '" + hostname
 + "' doesn't match docker hostname pattern");
   }
+  if (hostname.length() > HOST_NAME_LENGTH) {
+throw new ContainerExecutionException(
+"Hostname can not be greater than " + HOST_NAME_LENGTH
++ " characters: " + hostname);
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a966ec6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index a333bac..6ad35b2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -1539,6 +1539,19 @@ public class TestDockerContainerRuntime {
 }
   }
 
+  @Test
+  public void testValidDockerHostnameLength() throws Exception {
+String validLength = "example.test.site";
+DockerLinuxContainerRuntime.validateHostname(validLength);
+  }
+
+  @Test(expected = ContainerExecutionException.class)
+  public void testInvalidDockerHostnameLength() throws Exception {
+String invalidLength =
+"exampleexampleexampleexampleexampleexampleexampleexample.test.site";
+DockerLinuxContainerRuntime.validateHostname(invalidLength);
+  }
+
   @SuppressWarnings("unchecked")
   private void checkVolumeCreateCommand()
   throws PrivilegedOperationException, IOException {


-
To 

[04/50] [abbrv] hadoop git commit: YARN-8221. RMWebServices also need to honor yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.

2018-05-04 Thread xkrogen
YARN-8221. RMWebServices also need to honor 
yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef3ecc30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef3ecc30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef3ecc30

Branch: refs/heads/HDFS-12943
Commit: ef3ecc308dbea41c6a88bd4d16739c7bbc10cdda
Parents: bff3d7b
Author: Rohith Sharma K S 
Authored: Fri Apr 27 22:58:10 2018 +0530
Committer: Rohith Sharma K S 
Committed: Fri Apr 27 22:58:10 2018 +0530

--
 .../server/resourcemanager/webapp/RMWebServices.java   | 13 -
 1 file changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3ecc30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index d30764d..0564b67 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -228,6 +228,7 @@ public class RMWebServices extends WebServices implements 
RMWebServiceProtocol {
 
   @VisibleForTesting
   boolean isCentralizedNodeLabelConfiguration = true;
+  private boolean displayPerUserApps = false;
 
   public final static String DELEGATION_TOKEN_HEADER =
   "Hadoop-YARN-RM-Delegation-Token";
@@ -240,6 +241,9 @@ public class RMWebServices extends WebServices implements 
RMWebServiceProtocol {
 this.conf = conf;
 isCentralizedNodeLabelConfiguration =
 YarnConfiguration.isCentralizedNodeLabelConfiguration(conf);
+this.displayPerUserApps  = conf.getBoolean(
+YarnConfiguration.DISPLAY_APPS_FOR_LOGGED_IN_USER,
+YarnConfiguration.DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER);
   }
 
   RMWebServices(ResourceManager rm, Configuration conf,
@@ -608,7 +612,14 @@ public class RMWebServices extends WebServices implements 
RMWebServiceProtocol {
   DeSelectFields deSelectFields = new DeSelectFields();
   deSelectFields.initFields(unselectedFields);
 
-  AppInfo app = new AppInfo(rm, rmapp, hasAccess(rmapp, hsr),
+  boolean allowAccess = hasAccess(rmapp, hsr);
+  // Given RM is configured to display apps per user, skip apps to which
+  // this caller doesn't have access to view.
+  if (displayPerUserApps && !allowAccess) {
+continue;
+  }
+
+  AppInfo app = new AppInfo(rm, rmapp, allowAccess,
   WebAppUtils.getHttpSchemePrefix(conf), deSelectFields);
   allApps.add(app);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: HADOOP-15382. Log kinit output in credential renewal thread. Contributed by Gabor Bota.

2018-05-04 Thread xkrogen
HADOOP-15382. Log kinit output in credential renewal thread. Contributed by 
Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bff3d7b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bff3d7b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bff3d7b0

Branch: refs/heads/HDFS-12943
Commit: bff3d7b0cf073ccc061db30af6d52fa4a9f21c05
Parents: 24a5ccb
Author: Wei-Chiu Chuang 
Authored: Fri Apr 27 10:05:55 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Apr 27 10:05:55 2018 -0700

--
 .../java/org/apache/hadoop/security/UserGroupInformation.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bff3d7b0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index a9f6cb6..cb132b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -866,9 +866,9 @@ public class UserGroupInformation {
 if (now < nextRefresh) {
   Thread.sleep(nextRefresh - now);
 }
-Shell.execCommand(cmd, "-R");
+String output = Shell.execCommand(cmd, "-R");
 if (LOG.isDebugEnabled()) {
-  LOG.debug("renewed ticket");
+  LOG.debug("Renewed ticket. kinit output: {}", output);
 }
 reloginFromTicketCache();
 tgt = getTGT();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: YARN-8204. Added a flag to disable YARN service upgrade. Contributed by Chandni Singh

2018-05-04 Thread xkrogen
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 f8ee2123d -> f7f27391e


YARN-8204.  Added a flag to disable YARN service upgrade.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14b47990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14b47990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14b47990

Branch: refs/heads/HDFS-12943
Commit: 14b47990af39de71b0a09d995208f45ea3b79c23
Parents: 9ab3f97
Author: Eric Yang 
Authored: Fri Apr 27 12:23:56 2018 -0400
Committer: Eric Yang 
Committed: Fri Apr 27 12:24:43 2018 -0400

--
 .../yarn/service/client/ServiceClient.java  |  7 +
 .../yarn/service/conf/YarnServiceConf.java  |  7 +
 .../yarn/service/exceptions/ErrorStrings.java   |  2 ++
 .../yarn/service/TestYarnNativeServices.java|  1 +
 .../yarn/service/client/TestServiceClient.java  | 28 +---
 5 files changed, 41 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b47990/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 52cd369..8dd5342 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -74,6 +74,7 @@ import 
org.apache.hadoop.yarn.service.containerlaunch.ClasspathConstructor;
 import org.apache.hadoop.yarn.service.containerlaunch.JavaCommandLineBuilder;
 import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException;
 import org.apache.hadoop.yarn.service.exceptions.BadConfigException;
+import org.apache.hadoop.yarn.service.exceptions.ErrorStrings;
 import org.apache.hadoop.yarn.service.exceptions.SliderException;
 import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
 import org.apache.hadoop.yarn.service.provider.ProviderUtils;
@@ -224,6 +225,12 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
 
   public int initiateUpgrade(Service service) throws YarnException,
   IOException {
+boolean upgradeEnabled = getConfig().getBoolean(
+YARN_SERVICE_UPGRADE_ENABLED,
+YARN_SERVICE_UPGRADE_ENABLED_DEFAULT);
+if (!upgradeEnabled) {
+  throw new YarnException(ErrorStrings.SERVICE_UPGRADE_DISABLED);
+}
 Service persistedService =
 ServiceApiUtil.loadService(fs, service.getName());
 if (!StringUtils.isEmpty(persistedService.getId())) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b47990/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
index 55a3d70..13ed1aa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
@@ -128,6 +128,13 @@ public class YarnServiceConf {
   YARN_SERVICE_PREFIX + "container-health-threshold.";
 
   /**
+   * Upgrade feature enabled for services.
+   */
+  public static final String YARN_SERVICE_UPGRADE_ENABLED =
+  "yarn.service.upgrade.enabled";
+  public static final boolean YARN_SERVICE_UPGRADE_ENABLED_DEFAULT = false;
+
+  /**
* The container health threshold percent when explicitly set for a specific
* component or globally for all 

hadoop git commit: HADOOP-15444 ITestS3GuardToolDynamo should only run with -Ddynamo (Aaron Fabbri)

2018-05-04 Thread fabbri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8cdb032af -> 96c843f64


HADOOP-15444 ITestS3GuardToolDynamo should only run with -Ddynamo (Aaron Fabbri)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96c843f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96c843f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96c843f6

Branch: refs/heads/trunk
Commit: 96c843f64bb424cd7544be0ccda16a6755c086de
Parents: 8cdb032
Author: Aaron Fabbri 
Authored: Fri May 4 11:34:37 2018 -0700
Committer: Aaron Fabbri 
Committed: Fri May 4 11:34:45 2018 -0700

--
 .../hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96c843f6/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
index c7dffd2..821bba5 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
@@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import com.amazonaws.services.dynamodbv2.document.DynamoDB;
 import com.amazonaws.services.dynamodbv2.document.Table;
 import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException;
+import org.junit.Assume;
 import org.junit.Test;
 
 import org.apache.hadoop.conf.Configuration;
@@ -51,6 +52,14 @@ public class ITestS3GuardToolDynamoDB extends 
AbstractS3GuardToolTestBase {
 return new DynamoDBMetadataStore();
   }
 
+  @Override
+  public void setup() throws Exception {
+super.setup();
+Assume.assumeTrue("Test only applies when DynamoDB is used for S3Guard",
+getConfiguration().get(Constants.S3_METADATA_STORE_IMPL).equals(
+Constants.S3GUARD_METASTORE_DYNAMO));
+  }
+
   // Check the existence of a given DynamoDB table.
   private static boolean exist(DynamoDB dynamoDB, String tableName) {
 assertNotNull(dynamoDB);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8079. Support static and archive unmodified local resources in service AM. Contributed by Suma Shivaprasad

2018-05-04 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a984abc47 -> e933ed0ee


YARN-8079. Support static and archive unmodified local resources in service AM. 
Contributed by Suma Shivaprasad

(cherry picked from commit 6795f8072ffbe6138857e77d51af173f33e4e5c1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e933ed0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e933ed0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e933ed0e

Branch: refs/heads/branch-3.1
Commit: e933ed0ee372dec628f1fabc78590486355292bd
Parents: a984abc
Author: Billie Rinaldi 
Authored: Fri May 4 09:27:07 2018 -0700
Committer: Billie Rinaldi 
Committed: Fri May 4 09:47:06 2018 -0700

--
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |   2 +
 .../yarn/service/api/records/ConfigFile.java|   3 +-
 .../yarn/service/conf/YarnServiceConstants.java |   1 +
 .../provider/AbstractClientProvider.java|  23 ++-
 .../provider/AbstractProviderService.java   |   4 +
 .../yarn/service/provider/ProviderUtils.java|  91 --
 .../service/provider/TestProviderUtils.java | 164 +++
 .../providers/TestAbstractClientProvider.java   |  44 +
 .../markdown/yarn-service/YarnServiceAPI.md |   4 +-
 9 files changed, 321 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e933ed0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index 8c5ad65..cea8296 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -475,6 +475,8 @@ definitions:
   - YAML
   - TEMPLATE
   - HADOOP_XML
+  - STATIC
+  - ARCHIVE
   dest_file:
 type: string
 description: The path that this configuration file should be created 
as. If it is an absolute path, it will be mounted into the DOCKER container. 
Absolute paths are only allowed for DOCKER containers.  If it is a relative 
path, only the file name should be provided, and the file will be created in 
the container local working directory under a folder named conf.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e933ed0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
index d3b18bc..623feed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
@@ -55,7 +55,8 @@ public class ConfigFile implements Serializable {
   @XmlEnum
   public enum TypeEnum {
 XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE(
-"TEMPLATE"), HADOOP_XML("HADOOP_XML");
+"TEMPLATE"), HADOOP_XML("HADOOP_XML"), STATIC("STATIC"), ARCHIVE(
+"ARCHIVE");
 
 private String value;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e933ed0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java
--
diff --git 

hadoop git commit: YARN-8223. Improved yarn auxiliary service to load jar file from HDFS. Contributed by Zian Chen

2018-05-04 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 04c3f11de -> a984abc47


YARN-8223.  Improved yarn auxiliary service to load jar file from HDFS.
Contributed by Zian Chen

(cherry picked from commit 8cdb032aff4237d8d3970057d82290e4e32c4040)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a984abc4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a984abc4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a984abc4

Branch: refs/heads/branch-3.1
Commit: a984abc4724fe3862e29560cfa59c5086d91c3de
Parents: 04c3f11
Author: Eric Yang 
Authored: Fri May 4 12:36:31 2018 -0400
Committer: Eric Yang 
Committed: Fri May 4 12:38:28 2018 -0400

--
 .../PluggableShuffleAndPluggableSort.md | 44 
 .../containermanager/AuxServices.java   | 19 -
 2 files changed, 61 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a984abc4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
index 5ea0567..9e24103 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
@@ -67,6 +67,50 @@ The collector class configuration may specify a 
comma-separated list of collecto
 |: |: |: |
 | `yarn.nodemanager.aux-services` | `...,mapreduce_shuffle` | The auxiliary 
service name |
 | `yarn.nodemanager.aux-services.mapreduce_shuffle.class` | 
`org.apache.hadoop.mapred.ShuffleHandler` | The auxiliary service class to use |
+| `yarn.nodemanager.aux-services.%s.classpath` | NONE | local directory which 
includes the related jar file as well as all the dependencies’ jar file. We 
could specify the single jar file or use /dep/* to load all jars under the dep 
directory. |
+| `yarn.nodemanager.aux-services.%s.remote-classpath` | NONE | The remote 
absolute or relative path to jar file |
+
+ Example of loading jar file from HDFS:
+
+```xml
+
+
+yarn.nodemanager.aux-services
+mapreduce_shuffle,AuxServiceFromHDFS
+
+
+
+
yarn.nodemanager.aux-services.AuxServiceFromHDFS.remote-classpath
+/aux/test/aux-service-hdfs.jar
+
+
+
+yarn.nodemanager.aux-services.AuxServiceFromHDFS.class/name>
+org.apache.auxtest.AuxServiceFromHDFS2
+
+
+```
+
+ Example of loading jar file from local file system:
+
+```xml
+
+
+yarn.nodemanager.aux-services
+mapreduce_shuffle,AuxServiceFromHDFS
+
+
+
+yarn.nodemanager.aux-services.AuxServiceFromHDFS.classpath
+/aux/test/aux-service-hdfs.jar
+
+
+
+yarn.nodemanager.aux-services.AuxServiceFromHDFS.class/name>
+org.apache.auxtest.AuxServiceFromHDFS2
+
+
+```
 
 **IMPORTANT:** If setting an auxiliary service in addition the default
 `mapreduce_shuffle` service, then a new service key should be added to the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a984abc4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index c8b7a76..3fe3cfd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -230,15 +230,30 @@ public class AuxServices extends AbstractService
   }
 }
 if (reDownload) {
+  LocalResourceType srcType = null;
+  String lowerDst = 

hadoop git commit: YARN-8223. Improved yarn auxiliary service to load jar file from HDFS. Contributed by Zian Chen

2018-05-04 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6795f8072 -> 8cdb032af


YARN-8223.  Improved yarn auxiliary service to load jar file from HDFS.
Contributed by Zian Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cdb032a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cdb032a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cdb032a

Branch: refs/heads/trunk
Commit: 8cdb032aff4237d8d3970057d82290e4e32c4040
Parents: 6795f80
Author: Eric Yang 
Authored: Fri May 4 12:36:31 2018 -0400
Committer: Eric Yang 
Committed: Fri May 4 12:36:31 2018 -0400

--
 .../PluggableShuffleAndPluggableSort.md | 44 
 .../containermanager/AuxServices.java   | 19 -
 2 files changed, 61 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cdb032a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
index 5ea0567..9e24103 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
@@ -67,6 +67,50 @@ The collector class configuration may specify a 
comma-separated list of collecto
 |: |: |: |
 | `yarn.nodemanager.aux-services` | `...,mapreduce_shuffle` | The auxiliary 
service name |
 | `yarn.nodemanager.aux-services.mapreduce_shuffle.class` | 
`org.apache.hadoop.mapred.ShuffleHandler` | The auxiliary service class to use |
+| `yarn.nodemanager.aux-services.%s.classpath` | NONE | local directory which 
includes the related jar file as well as all the dependencies’ jar file. We 
could specify the single jar file or use /dep/* to load all jars under the dep 
directory. |
+| `yarn.nodemanager.aux-services.%s.remote-classpath` | NONE | The remote 
absolute or relative path to jar file |
+
+ Example of loading jar file from HDFS:
+
+```xml
+
+
+yarn.nodemanager.aux-services
+mapreduce_shuffle,AuxServiceFromHDFS
+
+
+
+
yarn.nodemanager.aux-services.AuxServiceFromHDFS.remote-classpath
+/aux/test/aux-service-hdfs.jar
+
+
+
+yarn.nodemanager.aux-services.AuxServiceFromHDFS.class/name>
+org.apache.auxtest.AuxServiceFromHDFS2
+
+
+```
+
+ Example of loading jar file from local file system:
+
+```xml
+
+
+yarn.nodemanager.aux-services
+mapreduce_shuffle,AuxServiceFromHDFS
+
+
+
+yarn.nodemanager.aux-services.AuxServiceFromHDFS.classpath
+/aux/test/aux-service-hdfs.jar
+
+
+
+yarn.nodemanager.aux-services.AuxServiceFromHDFS.class/name>
+org.apache.auxtest.AuxServiceFromHDFS2
+
+
+```
 
 **IMPORTANT:** If setting an auxiliary service in addition the default
 `mapreduce_shuffle` service, then a new service key should be added to the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cdb032a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index c8b7a76..3fe3cfd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -230,15 +230,30 @@ public class AuxServices extends AbstractService
   }
 }
 if (reDownload) {
+  LocalResourceType srcType = null;
+  String lowerDst = StringUtils.toLowerCase(src.toString());
+  if (lowerDst.endsWith(".jar")) {
+ 

hadoop git commit: YARN-8079. Support static and archive unmodified local resources in service AM. Contributed by Suma Shivaprasad

2018-05-04 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/trunk 502914ca3 -> 6795f8072


YARN-8079. Support static and archive unmodified local resources in service AM. 
Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6795f807
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6795f807
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6795f807

Branch: refs/heads/trunk
Commit: 6795f8072ffbe6138857e77d51af173f33e4e5c1
Parents: 502914c
Author: Billie Rinaldi 
Authored: Fri May 4 09:27:07 2018 -0700
Committer: Billie Rinaldi 
Committed: Fri May 4 09:27:07 2018 -0700

--
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |   2 +
 .../yarn/service/api/records/ConfigFile.java|   3 +-
 .../yarn/service/conf/YarnServiceConstants.java |   1 +
 .../provider/AbstractClientProvider.java|  23 ++-
 .../provider/AbstractProviderService.java   |   4 +
 .../yarn/service/provider/ProviderUtils.java|  91 --
 .../service/provider/TestProviderUtils.java | 164 +++
 .../providers/TestAbstractClientProvider.java   |  44 +
 .../markdown/yarn-service/YarnServiceAPI.md |   4 +-
 9 files changed, 321 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index 8c5ad65..cea8296 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -475,6 +475,8 @@ definitions:
   - YAML
   - TEMPLATE
   - HADOOP_XML
+  - STATIC
+  - ARCHIVE
   dest_file:
 type: string
 description: The path that this configuration file should be created 
as. If it is an absolute path, it will be mounted into the DOCKER container. 
Absolute paths are only allowed for DOCKER containers.  If it is a relative 
path, only the file name should be provided, and the file will be created in 
the container local working directory under a folder named conf.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
index d3b18bc..623feed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
@@ -55,7 +55,8 @@ public class ConfigFile implements Serializable {
   @XmlEnum
   public enum TypeEnum {
 XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE(
-"TEMPLATE"), HADOOP_XML("HADOOP_XML");
+"TEMPLATE"), HADOOP_XML("HADOOP_XML"), STATIC("STATIC"), ARCHIVE(
+"ARCHIVE");
 
 private String value;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6795f807/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java
 

hadoop git commit: YARN-7818. Remove privileged operation warnings during container launch for the ContainerRuntimes. Contributed by Shane Kumpf

2018-05-04 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 524dde400 -> 04c3f11de


YARN-7818. Remove privileged operation warnings during container launch for the 
ContainerRuntimes. Contributed by Shane Kumpf

(cherry picked from commit 502914ca32ac02b19116fd681eb8301b92fccbb3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04c3f11d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04c3f11d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04c3f11d

Branch: refs/heads/branch-3.1
Commit: 04c3f11de41f361be839f4ca926135610594c9ff
Parents: 524dde4
Author: Billie Rinaldi 
Authored: Fri May 4 08:53:55 2018 -0700
Committer: Billie Rinaldi 
Committed: Fri May 4 08:55:30 2018 -0700

--
 .../linux/runtime/DefaultLinuxContainerRuntime.java   |  5 +++--
 .../linux/runtime/DockerLinuxContainerRuntime.java| 14 +++---
 2 files changed, 10 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04c3f11d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
index d8db6ad..b5c933a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
@@ -108,6 +108,9 @@ public class DefaultLinuxContainerRuntime implements 
LinuxContainerRuntime {
   launchOp.appendArgs(tcCommandFile);
 }
 
+// Some failures here are acceptable. Let the calling executor decide.
+launchOp.disableFailureLogging();
+
 //List -> stored as List -> fetched/converted to List
 //we can't do better here thanks to type-erasure
 @SuppressWarnings("unchecked")
@@ -118,8 +121,6 @@ public class DefaultLinuxContainerRuntime implements 
LinuxContainerRuntime {
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
 launchOp, null, null, false, false);
 } catch (PrivilegedOperationException e) {
-  LOG.warn("Launch container failed. Exception: ", e);
-
   throw new ContainerExecutionException("Launch container failed", e
   .getExitCode(), e.getOutput(), e.getErrorOutput());
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04c3f11d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 33fdbd3..c04f02a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -909,13 +909,13 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 PrivilegedOperation launchOp = buildLaunchOp(ctx,
 commandFile, runCommand);
 
+// Some failures here are acceptable. Let the calling executor decide.
+launchOp.disableFailureLogging();
+
 try {
   privilegedOperationExecutor.executePrivilegedOperation(null,
   launchOp, null, null, false, false);
 } catch (PrivilegedOperationException e) {
-  LOG.warn("Launch container failed. Exception: ", 

hadoop git commit: YARN-7818. Remove privileged operation warnings during container launch for the ContainerRuntimes. Contributed by Shane Kumpf

2018-05-04 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/trunk a3b416f69 -> 502914ca3


YARN-7818. Remove privileged operation warnings during container launch for the 
ContainerRuntimes. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/502914ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/502914ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/502914ca

Branch: refs/heads/trunk
Commit: 502914ca32ac02b19116fd681eb8301b92fccbb3
Parents: a3b416f
Author: Billie Rinaldi 
Authored: Fri May 4 08:53:55 2018 -0700
Committer: Billie Rinaldi 
Committed: Fri May 4 08:53:55 2018 -0700

--
 .../linux/runtime/DefaultLinuxContainerRuntime.java   |  5 +++--
 .../linux/runtime/DockerLinuxContainerRuntime.java| 14 +++---
 2 files changed, 10 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/502914ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
index d8db6ad..b5c933a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
@@ -108,6 +108,9 @@ public class DefaultLinuxContainerRuntime implements 
LinuxContainerRuntime {
   launchOp.appendArgs(tcCommandFile);
 }
 
+// Some failures here are acceptable. Let the calling executor decide.
+launchOp.disableFailureLogging();
+
 //List -> stored as List -> fetched/converted to List
 //we can't do better here thanks to type-erasure
 @SuppressWarnings("unchecked")
@@ -118,8 +121,6 @@ public class DefaultLinuxContainerRuntime implements 
LinuxContainerRuntime {
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
 launchOp, null, null, false, false);
 } catch (PrivilegedOperationException e) {
-  LOG.warn("Launch container failed. Exception: ", e);
-
   throw new ContainerExecutionException("Launch container failed", e
   .getExitCode(), e.getOutput(), e.getErrorOutput());
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/502914ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index ec1d055..0bacd03 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -914,13 +914,13 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 PrivilegedOperation launchOp = buildLaunchOp(ctx,
 commandFile, runCommand);
 
+// Some failures here are acceptable. Let the calling executor decide.
+launchOp.disableFailureLogging();
+
 try {
   privilegedOperationExecutor.executePrivilegedOperation(null,
   launchOp, null, null, false, false);
 } catch (PrivilegedOperationException e) {
-  LOG.warn("Launch container failed. Exception: ", e);
-  LOG.info("Docker command used: " + runCommand);
-
   throw new