hadoop git commit: YARN-4092. Fixed UI redirection to print useful messages when both RMs are in standby mode. Contributed by Xuan Gong

2015-09-01 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 6c9e26609 -> 6b3b487d3


YARN-4092. Fixed UI redirection to print useful messages when both RMs are in 
standby mode. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b3b487d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b3b487d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b3b487d

Branch: refs/heads/branch-2.6
Commit: 6b3b487d3f4883a6e849c71886da52c4c4d9f0bf
Parents: 6c9e266
Author: Jian He 
Authored: Mon Aug 31 22:06:01 2015 -0700
Committer: Jian He 
Committed: Mon Aug 31 22:06:01 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../hadoop/yarn/client/TestRMFailover.java  | 25 ++
 .../hadoop/yarn/webapp/YarnWebParams.java   |  1 +
 .../resourcemanager/webapp/RMWebAppFilter.java  | 88 +++-
 4 files changed, 114 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b3b487d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 74d2441..de63e89 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.6.2 - UNRELEASED
 
   IMPROVEMENTS
 
+YARN-4092. Fixed UI redirection to print useful messages when both RMs are
+in standby mode. (Xuan Gong via jianhe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b3b487d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index 0634cc3..c2089b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -285,6 +286,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
 getAdminService(0).transitionToActive(req);
 String rm1Url = "http://0.0.0.0:18088;;
 String rm2Url = "http://0.0.0.0:28088;;
+
 String header = getHeader("Refresh", rm2Url);
 assertTrue(header.contains("; url=" + rm1Url));
 
@@ -323,6 +325,16 @@ public class TestRMFailover extends ClientBaseWithFixes {
 
 // Due to the limitation of MiniYARNCluster and dispatcher is a singleton,
 // we couldn't add the test case after explicitFailover();
+
+// transit the active RM to standby
+// Both of RMs are in standby mode
+getAdminService(0).transitionToStandby(req);
+// RM2 is expected to send the httpRequest to itself.
+// The Header Field: Refresh is expected to be set.
+String redirectURL = getRefreshURL(rm2Url);
+assertTrue(redirectURL != null
+&& redirectURL.contains(YarnWebParams.NEXT_REFRESH_INTERVAL)
+&& redirectURL.contains(rm2Url));
   }
 
   static String getHeader(String field, String url) {
@@ -337,4 +349,17 @@ public class TestRMFailover extends ClientBaseWithFixes {
 return fieldHeader;
   }
 
+  static String getRefreshURL(String url) {
+String redirectUrl = null;
+try {
+  HttpURLConnection conn = (HttpURLConnection) new 
URL(url).openConnection();
+  // do not automatically follow the redirection
+  // otherwise we get too many redirections exception
+  conn.setInstanceFollowRedirects(false);
+  redirectUrl = conn.getHeaderField("Refresh");
+} catch (Exception e) {
+  // throw new RuntimeException(e);
+}
+return redirectUrl;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b3b487d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
 

hadoop git commit: HDFS-7009. Active NN and standby NN have different live nodes. Contributed by Ming Ma.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 4fcf71c1e -> be4d4c9cb


HDFS-7009. Active NN and standby NN have different live nodes. Contributed by 
Ming Ma.

(cherry picked from commit 769507bd7a501929d9a2fd56c72c3f50673488a4)
(cherry picked from commit 657a6e389b3f6eae43efb11deb6253c3b1255a51)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java

(cherry picked from commit d5ddc3450f2f49ea411de590ff3de15b5ec4e17c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be4d4c9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be4d4c9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be4d4c9c

Branch: refs/heads/branch-2.6.1
Commit: be4d4c9cbf23f0b1de31f19e49a3c2944c6c8657
Parents: 4fcf71c
Author: cnauroth 
Authored: Mon Feb 23 15:12:27 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 11:23:26 2015 -0700

--
 .../main/java/org/apache/hadoop/ipc/Client.java |   3 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../TestDatanodeProtocolRetryPolicy.java| 231 +++
 3 files changed, 236 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be4d4c9c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 96da01c..8a98eb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -25,6 +25,7 @@ import java.io.BufferedOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.EOFException;
 import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -279,7 +280,7 @@ public class Client {
   /** Check the rpc response header. */
   void checkResponse(RpcResponseHeaderProto header) throws IOException {
 if (header == null) {
-  throw new IOException("Response is null.");
+  throw new EOFException("Response is null.");
 }
 if (header.hasClientId()) {
   // check client IDs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be4d4c9c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b6cb70b..3731fe9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -76,6 +76,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7788. Post-2.6 namenode may not start up with an image containing
 inodes created with an old release. (Rushabh Shah via kihwal)
 
+HDFS-7009. Active NN and standby NN have different live nodes.
+(Ming Ma via cnauroth)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be4d4c9c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
new file mode 100644
index 000..c7ed5b9
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
@@ -0,0 +1,231 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ 

hadoop git commit: HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be closed in finally block. Contributed by Kiran Kumar M R and Sanghyun Yun.

2015-09-01 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 bcb98ba08 -> 6078172fe


HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be closed 
in finally block. Contributed by Kiran Kumar M R and Sanghyun Yun.

(cherry picked from commit dd149adeace8727864371c5a1484c6534f8b450b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6078172f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6078172f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6078172f

Branch: refs/heads/branch-2.7
Commit: 6078172fee6b0843cf4aa0fce5090d728ab7e53a
Parents: bcb98ba
Author: Tsuyoshi Ozawa 
Authored: Wed Sep 2 02:01:51 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Wed Sep 2 02:02:28 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../src/main/java/org/apache/hadoop/fs/FileUtil.java  | 14 +++---
 2 files changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6078172f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index cdc7b66..010ba3c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -35,6 +35,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-12359. hadoop fs -getmerge doc is wrong.
 (Jagadesh Kiran N via aajisaka)
 
+HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be
+closed in finally block. (Kiran Kumar M R and Sanghyun Yun via ozawa)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6078172f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 91f00e1..23fb946 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -729,15 +729,15 @@ public class FileUtil {
 
 int count;
 byte data[] = new byte[2048];
-BufferedOutputStream outputStream = new BufferedOutputStream(
-new FileOutputStream(outputFile));
+try (BufferedOutputStream outputStream = new BufferedOutputStream(
+new FileOutputStream(outputFile));) {
 
-while ((count = tis.read(data)) != -1) {
-  outputStream.write(data, 0, count);
-}
+  while ((count = tis.read(data)) != -1) {
+outputStream.write(data, 0, count);
+  }
 
-outputStream.flush();
-outputStream.close();
+  outputStream.flush();
+}
   }
   
   /**



hadoop git commit: HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be closed in finally block. Contributed by Kiran Kumar M R and Sanghyun Yun.

2015-09-01 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 236c4ab51 -> b1499ab0f


HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be closed 
in finally block. Contributed by Kiran Kumar M R and Sanghyun Yun.

(cherry picked from commit dd149adeace8727864371c5a1484c6534f8b450b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1499ab0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1499ab0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1499ab0

Branch: refs/heads/branch-2
Commit: b1499ab0fe8384493404ae36c17006ea34c2e113
Parents: 236c4ab
Author: Tsuyoshi Ozawa 
Authored: Wed Sep 2 02:01:51 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Wed Sep 2 02:02:10 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../src/main/java/org/apache/hadoop/fs/FileUtil.java  | 14 +++---
 2 files changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1499ab0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7c12a47..75206af 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -626,6 +626,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-12359. hadoop fs -getmerge doc is wrong.
 (Jagadesh Kiran N via aajisaka)
 
+HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be
+closed in finally block. (Kiran Kumar M R and Sanghyun Yun via ozawa)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1499ab0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 543f843..aade933 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -738,15 +738,15 @@ public class FileUtil {
 
 int count;
 byte data[] = new byte[2048];
-BufferedOutputStream outputStream = new BufferedOutputStream(
-new FileOutputStream(outputFile));
+try (BufferedOutputStream outputStream = new BufferedOutputStream(
+new FileOutputStream(outputFile));) {
 
-while ((count = tis.read(data)) != -1) {
-  outputStream.write(data, 0, count);
-}
+  while ((count = tis.read(data)) != -1) {
+outputStream.write(data, 0, count);
+  }
 
-outputStream.flush();
-outputStream.close();
+  outputStream.flush();
+}
   }
   
   /**



hadoop git commit: HADOOP-11604. Prevent ConcurrentModificationException while closing domain sockets during shutdown of DomainSocketWatcher thread. Contributed by Chris Nauroth.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 02e0b6e30 -> 342504f79


HADOOP-11604. Prevent ConcurrentModificationException while closing domain 
sockets during shutdown of DomainSocketWatcher thread. Contributed by Chris 
Nauroth.

(cherry picked from commit 3c5ff0759c4f4e10c97c6d9036add00edb8be2b5)
(cherry picked from commit 187e081d5a8afe1ddfe5d7b5e7de7a94512aa53e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/342504f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/342504f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/342504f7

Branch: refs/heads/branch-2.6.1
Commit: 342504f790e1ae221db82ddaca56164fc8ad0da6
Parents: 02e0b6e
Author: cnauroth 
Authored: Fri Feb 20 13:07:16 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 11:10:41 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../hadoop/net/unix/DomainSocketWatcher.java| 45 --
 .../net/unix/TestDomainSocketWatcher.java   | 65 ++--
 3 files changed, 105 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/342504f7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5891feb..2dc1520 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -42,6 +42,9 @@ Release 2.6.1 - UNRELEASED
 HADOOP-11295. RPC Server Reader thread can't shutdown if RPCCallQueue is
 full. (Ming Ma via kihwal)
 
+HADOOP-11604. Prevent ConcurrentModificationException while closing domain
+sockets during shutdown of DomainSocketWatcher thread. (cnauroth)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/342504f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
index 0172f6b..8c617dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
@@ -246,6 +246,13 @@ public final class DomainSocketWatcher implements 
Closeable {
 this.interruptCheckPeriodMs = interruptCheckPeriodMs;
 notificationSockets = DomainSocket.socketpair();
 watcherThread.setDaemon(true);
+watcherThread.setUncaughtExceptionHandler(
+new Thread.UncaughtExceptionHandler() {
+  @Override
+  public void uncaughtException(Thread thread, Throwable t) {
+LOG.error(thread + " terminating on unexpected exception", t);
+  }
+});
 watcherThread.start();
   }
 
@@ -372,7 +379,17 @@ public final class DomainSocketWatcher implements 
Closeable {
 }
   }
 
-  private void sendCallback(String caller, TreeMap entries,
+  /**
+   * Send callback and return whether or not the domain socket was closed as a
+   * result of processing.
+   *
+   * @param caller reason for call
+   * @param entries mapping of file descriptor to entry
+   * @param fdSet set of file descriptors
+   * @param fd file descriptor
+   * @return true if the domain socket was closed as a result of processing
+   */
+  private boolean sendCallback(String caller, TreeMap entries,
   FdSet fdSet, int fd) {
 if (LOG.isTraceEnabled()) {
   LOG.trace(this + ": " + caller + " starting sendCallback for fd " + fd);
@@ -401,13 +418,30 @@ public final class DomainSocketWatcher implements 
Closeable {
 "still in the poll(2) loop.");
   }
   IOUtils.cleanup(LOG, sock);
-  entries.remove(fd);
   fdSet.remove(fd);
+  return true;
 } else {
   if (LOG.isTraceEnabled()) {
 LOG.trace(this + ": " + caller + ": sendCallback not " +
 "closing fd " + fd);
   }
+  return false;
+}
+  }
+
+  /**
+   * Send callback, and if the domain socket was closed as a result of
+   * processing, then also remove the entry for the file descriptor.
+   *
+   * @param caller reason for call
+   * @param entries mapping of file descriptor to entry
+   * @param fdSet set of file descriptors
+   * @param fd file descriptor
+   */
+  private void sendCallbackAndRemove(String caller,
+  TreeMap

hadoop git commit: HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf within target folder.

2015-09-01 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b1499ab0f -> 7d833a305


HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf within 
target folder.

(cherry picked from commit 0eb9b1932590ba6c60ac996d03269dffb4e095cc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d833a30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d833a30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d833a30

Branch: refs/heads/branch-2
Commit: 7d833a305895e098affbf8800a5d6a936bd224ca
Parents: b1499ab
Author: Andrew Wang 
Authored: Tue Sep 1 10:57:32 2015 -0700
Committer: Andrew Wang 
Committed: Tue Sep 1 10:59:39 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 5 -
 hadoop-project/pom.xml   | 2 +-
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 2 +-
 3 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d833a30/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 75206af..77a6f78 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -181,6 +181,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12367. Move TestFileUtil's test resources to resources folder.
 (wang via yliu)
 
+HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf
+within target folder. (wang)
+
   BUG FIXES
 
 HADOOP-12124. Add HTrace support for FsShell (cmccabe)
@@ -252,7 +255,7 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
 (Anu Engineer via xyao)
 
- OPTIMIZATIONS
+  OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp
 buildListing (Zoran Dimitrijevic via Colin P. McCabe)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d833a30/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index d4dbeee..1a01d72 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1179,7 +1179,7 @@
 ${test.build.classes}
 
 true
-
${basedir}/src/test/resources/krb5.conf
+
${project.build.directory}/test-classes/krb5.conf
 ${java.security.egd}
 
${require.test.libhadoop}
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d833a30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index bc40195..04f8640 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -198,7 +198,7 @@
   ${test.build.classes}
 
   true
-  
${basedir}/src/test/resources/krb5.conf
+  
${project.build.directory}/test-classes/krb5.conf
   ${java.security.egd}
   
${require.test.libhadoop}
 



hadoop git commit: HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf within target folder.

2015-09-01 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk dd149adea -> 0eb9b1932


HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf within 
target folder.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0eb9b193
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0eb9b193
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0eb9b193

Branch: refs/heads/trunk
Commit: 0eb9b1932590ba6c60ac996d03269dffb4e095cc
Parents: dd149ad
Author: Andrew Wang 
Authored: Tue Sep 1 10:57:32 2015 -0700
Committer: Andrew Wang 
Committed: Tue Sep 1 10:57:32 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 hadoop-project/pom.xml   | 2 +-
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eb9b193/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 70252d6..e915290 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -762,6 +762,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12367. Move TestFileUtil's test resources to resources folder.
 (wang via yliu)
 
+HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf
+within target folder. (wang)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eb9b193/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 86102c6..9863475 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1132,7 +1132,7 @@
 ${test.build.classes}
 
 true
-
${basedir}/src/test/resources/krb5.conf
+
${project.build.directory}/test-classes/krb5.conf
 ${java.security.egd}
 
${require.test.libhadoop}
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eb9b193/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index 52994e7..635f693 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -198,7 +198,7 @@
   ${test.build.classes}
 
   true
-  
${basedir}/src/test/resources/krb5.conf
+  
${project.build.directory}/test-classes/krb5.conf
   ${java.security.egd}
   
${require.test.libhadoop}
 



hadoop git commit: YARN-3238. Connection timeouts to nodemanagers are retried at multiple levels. Contributed by Jason Lowe

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 342504f79 -> 4fcf71c1e


YARN-3238. Connection timeouts to nodemanagers are retried at multiple
levels. Contributed by Jason Lowe

(cherry picked from commit 92d67ace3248930c0c0335070cc71a480c566a36)
(cherry picked from commit fefeba4ac8bed44ce2dd0d3c4f0a99953ff8d4df)
(cherry picked from commit d8f02e1c5c3bcc230d942554b2f4cfbc3ed21526)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4fcf71c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4fcf71c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4fcf71c1

Branch: refs/heads/branch-2.6.1
Commit: 4fcf71c1e715dbd4f5933e8114c8bab2050e9d31
Parents: 342504f
Author: Xuan 
Authored: Sat Feb 21 16:06:12 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 11:19:37 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java  | 1 -
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fcf71c1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5dc5b54..d96a0d9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -82,6 +82,9 @@ Release 2.6.1 - UNRELEASED
 YARN-3207. Secondary filter matches entites which do not have the key being
 filtered for. (Zhijie Shen via xgong)
 
+YARN-3238. Connection timeouts to nodemanagers are retried at
+multiple levels (Jason Lowe via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fcf71c1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
index b6fea62..6024560 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
@@ -72,7 +72,6 @@ public class ServerProxy {
 exceptionToPolicyMap.put(ConnectException.class, retryPolicy);
 exceptionToPolicyMap.put(NoRouteToHostException.class, retryPolicy);
 exceptionToPolicyMap.put(UnknownHostException.class, retryPolicy);
-exceptionToPolicyMap.put(ConnectTimeoutException.class, retryPolicy);
 exceptionToPolicyMap.put(RetriableException.class, retryPolicy);
 exceptionToPolicyMap.put(SocketException.class, retryPolicy);
 



hadoop git commit: HDFS-7763. fix zkfc hung issue due to not catching exception in a corner case. Contributed by Liang Xie.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 be4d4c9cb -> 406ec495b


HDFS-7763. fix zkfc hung issue due to not catching exception in a corner case. 
Contributed by Liang Xie.

(cherry picked from commit 7105ebaa9f370db04962a1e19a67073dc080433b)
(cherry picked from commit efb7e287f45c6502f293456034a37d9209a917be)
(cherry picked from commit fd70e4db105e140fc3d60042abb3f598c9afd13f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/406ec495
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/406ec495
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/406ec495

Branch: refs/heads/branch-2.6.1
Commit: 406ec495bb5af0dd838f28589716d13a776eadd8
Parents: be4d4c9
Author: Andrew Wang 
Authored: Tue Feb 24 15:31:13 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 11:30:04 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../apache/hadoop/hdfs/tools/DFSZKFailoverController.java   | 9 +++--
 2 files changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/406ec495/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3731fe9..09122f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -79,6 +79,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7009. Active NN and standby NN have different live nodes.
 (Ming Ma via cnauroth)
 
+HDFS-7763. fix zkfc hung issue due to not catching exception in a corner
+case. (Liang Xie via wang)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/406ec495/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
index a42b1e3..85f77f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
@@ -176,8 +176,13 @@ public class DFSZKFailoverController extends 
ZKFailoverController {
 new HdfsConfiguration(), args);
 DFSZKFailoverController zkfc = DFSZKFailoverController.create(
 parser.getConfiguration());
-
-System.exit(zkfc.run(parser.getRemainingArgs()));
+int retCode = 0;
+try {
+  retCode = zkfc.run(parser.getRemainingArgs());
+} catch (Throwable t) {
+  LOG.fatal("Got a fatal error, exiting now", t);
+}
+System.exit(retCode);
   }
 
   @Override



hadoop git commit: HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be closed in finally block. Contributed by Kiran Kumar M R and Sanghyun Yun.

2015-09-01 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2e251a767 -> dd149adea


HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be closed 
in finally block. Contributed by Kiran Kumar M R and Sanghyun Yun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd149ade
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd149ade
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd149ade

Branch: refs/heads/trunk
Commit: dd149adeace8727864371c5a1484c6534f8b450b
Parents: 2e251a7
Author: Tsuyoshi Ozawa 
Authored: Wed Sep 2 02:01:51 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Wed Sep 2 02:01:51 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../src/main/java/org/apache/hadoop/fs/FileUtil.java  | 14 +++---
 2 files changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd149ade/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4eef964..70252d6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1123,6 +1123,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-12359. hadoop fs -getmerge doc is wrong.
 (Jagadesh Kiran N via aajisaka)
 
+HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be
+closed in finally block. (Kiran Kumar M R and Sanghyun Yun via ozawa)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd149ade/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 8abb4eb..3c0e90d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -742,15 +742,15 @@ public class FileUtil {
 
 int count;
 byte data[] = new byte[2048];
-BufferedOutputStream outputStream = new BufferedOutputStream(
-new FileOutputStream(outputFile));
+try (BufferedOutputStream outputStream = new BufferedOutputStream(
+new FileOutputStream(outputFile));) {
 
-while ((count = tis.read(data)) != -1) {
-  outputStream.write(data, 0, count);
-}
+  while ((count = tis.read(data)) != -1) {
+outputStream.write(data, 0, count);
+  }
 
-outputStream.flush();
-outputStream.close();
+  outputStream.flush();
+}
   }
 
   /**



hadoop git commit: YARN-3369. Missing NullPointer check in AppSchedulingInfo causes RM to die. (Brahma Reddy Battula via wangda)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 553efd719 -> 005d86549


YARN-3369. Missing NullPointer check in AppSchedulingInfo causes RM to die. 
(Brahma Reddy Battula via wangda)

(cherry picked from commit 6bc7710ec7f2592c4c87dd940fbe5827ef81fe72)
(cherry picked from commit 8e142d27cbddfa1a1c83c5f8752bd14ac0a13612)
(cherry picked from commit 4d43be3c01b1bc0deb31a9081fca5395d0eb4e0d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/005d8654
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/005d8654
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/005d8654

Branch: refs/heads/branch-2.6.1
Commit: 005d865494d59fa9b15e7c76a51022d852e70f8c
Parents: 553efd7
Author: Wangda Tan 
Authored: Fri Mar 20 10:42:05 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 17:10:42 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../resourcemanager/scheduler/AppSchedulingInfo.java  | 10 ++
 2 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/005d8654/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 786d919..5762671 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -114,6 +114,9 @@ Release 2.6.1 - UNRELEASED
 YARN-3267. Timelineserver applies the ACL rules after applying the limit on
 the number of records (Chang Li via jeagles)
 
+YARN-3369. Missing NullPointer check in AppSchedulingInfo causes RM to die.
+(Brahma Reddy Battula via wangda)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/005d8654/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 3ade7f7..331cee9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -236,7 +236,7 @@ public class AppSchedulingInfo {
 
   public synchronized Resource getResource(Priority priority) {
 ResourceRequest request = getResourceRequest(priority, 
ResourceRequest.ANY);
-return request.getCapability();
+return (request == null) ? null : request.getCapability();
   }
 
   public synchronized boolean isBlacklisted(String resourceName) {
@@ -380,9 +380,11 @@ public class AppSchedulingInfo {
 boolean deactivate = true;
 for (Priority priority : getPriorities()) {
   ResourceRequest request = getResourceRequest(priority, 
ResourceRequest.ANY);
-  if (request.getNumContainers() > 0) {
-deactivate = false;
-break;
+  if (request != null) {
+if (request.getNumContainers() > 0) {
+  deactivate = false;
+  break;
+}
   }
 }
 if (deactivate) {



hadoop git commit: HDFS-7587. Edit log corruption can happen if append fails with a quota violation. Contributed by Jing Zhao.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 a9bb641d5 -> 49ef26b5a


HDFS-7587. Edit log corruption can happen if append fails with a quota 
violation. Contributed by Jing Zhao.

Committed Ming Ma's 2.6 patch.

(cherry picked from commit 7f0bb5d3fe0db2e6b9354c8d8a1b603f2390184f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49ef26b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49ef26b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49ef26b5

Branch: refs/heads/branch-2.6.1
Commit: 49ef26b5ac299d0f17afce4e6e7e30afdfef4d18
Parents: a9bb641
Author: Jing Zhao 
Authored: Wed Mar 18 18:51:14 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 16:33:30 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/namenode/FSDirectory.java   |  8 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 86 +++-
 .../hdfs/server/namenode/INodesInPath.java  |  4 +
 .../namenode/TestDiskspaceQuotaUpdate.java  | 42 ++
 6 files changed, 122 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49ef26b5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ed845f5..44a7139 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -94,6 +94,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7830. DataNode does not release the volume lock when adding a volume
 fails. (Lei Xu via Colin P. Mccabe)
 
+HDFS-7587. Edit log corruption can happen if append fails with a quota
+violation. (jing9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49ef26b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 9ca50c4..95877ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -267,6 +267,10 @@ public class FSDirectory implements Closeable {
 }
   }
 
+  boolean shouldSkipQuotaChecks() {
+return skipQuotaCheck;
+  }
+
   /** Enable quota verification */
   void enableQuotaChecks() {
 skipQuotaCheck = false;
@@ -1738,7 +1742,7 @@ public class FSDirectory implements Closeable {
* update quota of each inode and check to see if quota is exceeded. 
* See {@link #updateCount(INodesInPath, long, long, boolean)}
*/ 
-  private void updateCountNoQuotaCheck(INodesInPath inodesInPath,
+  void updateCountNoQuotaCheck(INodesInPath inodesInPath,
   int numOfINodes, long nsDelta, long dsDelta) {
 assert hasWriteLock();
 try {
@@ -1877,7 +1881,7 @@ public class FSDirectory implements Closeable {
*  Pass null if a node is not being moved.
* @throws QuotaExceededException if quota limit is exceeded.
*/
-  private static void verifyQuota(INode[] inodes, int pos, long nsDelta,
+  static void verifyQuota(INode[] inodes, int pos, long nsDelta,
   long dsDelta, INode commonAncestor) throws QuotaExceededException {
 if (nsDelta <= 0 && dsDelta <= 0) {
   // if quota is being freed or not being consumed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49ef26b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 7dfe688..cb5afbb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -387,7 +387,7 @@ public class FSEditLogLoader {
 "for append");
   }
   LocatedBlock lb = fsNamesys.prepareFileForWrite(path,
-  oldFile, addCloseOp.clientName, addCloseOp.clientMachine, false, 

hadoop git commit: HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp provided by the client is larger than the one stored in the datanode. Contributed by Brahma Reddy Battul

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 914cc8f4a -> 9b105b78d


HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp 
provided by the client is larger than the one stored in the datanode.  
Contributed by Brahma Reddy Battula

(cherry picked from commit fe693b72dec703ecbf4ab3919d61d06ea8735a9e)
(cherry picked from commit 4c648779d69a668f5147b183af2d40d45d1227d2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b105b78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b105b78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b105b78

Branch: refs/heads/branch-2.6.1
Commit: 9b105b78d403b18111959ff2d59b32a78744179b
Parents: 914cc8f
Author: Tsz-Wo Nicholas Sze 
Authored: Tue Mar 24 13:49:17 2015 +0900
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 18:25:02 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 4 
 .../org/apache/hadoop/hdfs/server/datanode/BlockSender.java   | 7 +++
 2 files changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b105b78/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 904d02d..5ffe005 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -102,6 +102,10 @@ Release 2.6.1 - UNRELEASED
 
 HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
 
+HDFS-7884. Fix NullPointerException in BlockSender when the generation 
stamp
+provided by the client is larger than the one stored in the datanode.
+(Brahma Reddy Battula via szetszwo)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b105b78/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index c8855d7..fdfdbfa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -240,6 +240,13 @@ class BlockSender implements java.io.Closeable {
   if (replica.getGenerationStamp() < block.getGenerationStamp()) {
 throw new IOException("Replica gen stamp < block genstamp, block="
 + block + ", replica=" + replica);
+  } else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
+if (DataNode.LOG.isDebugEnabled()) {
+  DataNode.LOG.debug("Bumping up the client provided"
+  + " block's genstamp to latest " + replica.getGenerationStamp()
+  + " for block " + block);
+}
+block.setGenerationStamp(replica.getGenerationStamp());
   }
   if (replicaVisibleLength < 0) {
 throw new IOException("Replica is not readable, block="



hadoop git commit: YARN-3239. WebAppProxy does not support a final tracking url which has query fragments and params. Contributed by Jian He (cherry picked from commit 1a68fc43464d3948418f453bb2f80df7

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 406ec495b -> a4b8897b3


YARN-3239. WebAppProxy does not support a final tracking url which has query 
fragments and params. Contributed by Jian He
(cherry picked from commit 1a68fc43464d3948418f453bb2f80df7ce773097)

(cherry picked from commit 257087417e424e628f090b6b648ccb3b9c880250)
(cherry picked from commit 49468108c203bf093acdc93c1798d90c480c3a17)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4b8897b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4b8897b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4b8897b

Branch: refs/heads/branch-2.6.1
Commit: a4b8897b306103c20b97d9fbc20cc93e968620ae
Parents: 406ec49
Author: Jason Lowe 
Authored: Wed Feb 25 16:14:34 2015 +
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 13:32:21 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../server/webproxy/WebAppProxyServlet.java | 23 ++--
 .../server/webproxy/TestWebAppProxyServlet.java | 23 +++-
 3 files changed, 41 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4b8897b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d96a0d9..78d7093 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -85,6 +85,9 @@ Release 2.6.1 - UNRELEASED
 YARN-3238. Connection timeouts to nodemanagers are retried at
 multiple levels (Jason Lowe via xgong)
 
+YARN-3239. WebAppProxy does not support a final tracking url which has
+query fragments and params (Jian He via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4b8897b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
index 19ae9dc..d41374a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
@@ -36,6 +36,7 @@ import javax.servlet.http.Cookie;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.UriBuilder;
 
 import org.apache.commons.httpclient.Header;
 import org.apache.commons.httpclient.HostConfiguration;
@@ -58,6 +59,8 @@ import org.apache.hadoop.yarn.util.TrackingUriPlugin;
 import org.apache.hadoop.yarn.webapp.MimeType;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.utils.URLEncodedUtils;
 
 public class WebAppProxyServlet extends HttpServlet {
   private static final long serialVersionUID = 1L;
@@ -322,13 +325,19 @@ public class WebAppProxyServlet extends HttpServlet {
 req.getQueryString(), true), runningUser, id);
 return;
   }
-  URI toFetch = new URI(trackingUri.getScheme(), 
-  trackingUri.getAuthority(),
-  StringHelper.ujoin(trackingUri.getPath(), rest), 
req.getQueryString(),
-  null);
-  
-  LOG.info(req.getRemoteUser()+" is accessing unchecked "+toFetch+
-  " which is the app master GUI of "+appId+" owned by "+runningUser);
+
+  // Append the user-provided path and query parameter to the original
+  // tracking url.
+  List queryPairs =
+  URLEncodedUtils.parse(req.getQueryString(), null);
+  UriBuilder builder = UriBuilder.fromUri(trackingUri);
+  for (NameValuePair pair : queryPairs) {
+builder.queryParam(pair.getName(), pair.getValue());
+  }
+  URI toFetch = builder.path(rest).build();
+
+  LOG.info(remoteUser+" is accessing unchecked "+toFetch+
+" which is the app master GUI of "+appId+" owned by "+runningUser);
 
   switch(applicationReport.getYarnApplicationState()) {
   case KILLED:


hadoop git commit: HDFS-7871. NameNodeEditLogRoller can keep printing 'Swallowing exception' message. Contributed by Jing Zhao.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 a4b8897b3 -> 6e090bc53


HDFS-7871. NameNodeEditLogRoller can keep printing 'Swallowing exception' 
message. Contributed by Jing Zhao.

(cherry picked from commit b442aeec95abfa1c6f835a116dfe6e186b0d841d)
(cherry picked from commit 6090f51725e2b44d794433ed72a1901fae2ba7e3)
(cherry picked from commit e1af1ac4e91d36b21df18ce5627e1f69f27f0776)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e090bc5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e090bc5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e090bc5

Branch: refs/heads/branch-2.6.1
Commit: 6e090bc53df39eb4fb6745fa2d8b71092c74ce86
Parents: a4b8897
Author: Jing Zhao 
Authored: Mon Mar 2 20:22:04 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 13:36:21 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 8 +---
 2 files changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e090bc5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 09122f9..1811337 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -82,6 +82,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7763. fix zkfc hung issue due to not catching exception in a corner
 case. (Liang Xie via wang)
 
+HDFS-7871. NameNodeEditLogRoller can keep printing "Swallowing exception"
+message. (jing9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e090bc5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 8e5a2db..5541637 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5203,14 +5203,16 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
 + rollThreshold);
 rollEditLog();
   }
+} catch (Exception e) {
+  FSNamesystem.LOG.error("Swallowing exception in "
+  + NameNodeEditLogRoller.class.getSimpleName() + ":", e);
+}
+try {
   Thread.sleep(sleepIntervalMs);
 } catch (InterruptedException e) {
   FSNamesystem.LOG.info(NameNodeEditLogRoller.class.getSimpleName()
   + " was interrupted, exiting");
   break;
-} catch (Exception e) {
-  FSNamesystem.LOG.error("Swallowing exception in "
-  + NameNodeEditLogRoller.class.getSimpleName() + ":", e);
 }
   }
 }



hadoop git commit: YARN-3222. Added the missing CHANGES.txt entry.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 6078172fe -> 015696fb8


YARN-3222. Added the missing CHANGES.txt entry.

(cherry picked from commit 4620767156ecc43424bc6c7c4d50519e2563cc69)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/015696fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/015696fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/015696fb

Branch: refs/heads/branch-2.7
Commit: 015696fb81912d545ce602b245c456c2741a3922
Parents: 6078172
Author: Vinod Kumar Vavilapalli 
Authored: Tue Sep 1 13:43:10 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 13:44:51 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/015696fb/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ee9b66a..427e7a3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -916,6 +916,9 @@ Release 2.7.0 - 2015-04-20
 YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a 
queue's
 available resource-limit from the parent queue. (Wangda Tan via vinodkv)
 
+YARN-3222. Fixed RMNode to send scheduler events in sequential order when a
+node reconnects. (Rohith Sharma K S via jianhe)
+
 YARN-3131. YarnClientImpl should check FAILED and KILLED state in
 submitApplication (Chang Li via jlowe)
 



[4/4] hadoop git commit: HDFS-7885. Datanode should not trust the generation stamp provided by client. Contributed by Tsz Wo Nicholas Sze.

2015-09-01 Thread vinodkv
HDFS-7885. Datanode should not trust the generation stamp provided by client. 
Contributed by Tsz Wo Nicholas Sze.

(cherry picked from commit 24db0812be64e83a48ade01fc1eaaeaedad4dec0)
(cherry picked from commit 994dadb9ba0a3b87b6548e6e0801eadd26554d55)
(cherry picked from commit 0bc5c6495a7feb4365af0ce5fe48fc87b7e1749f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b25491dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b25491dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b25491dc

Branch: refs/heads/branch-2.6.1
Commit: b25491dc45be92cc3ec157d998a2632387c3952f
Parents: 7ffdf7d
Author: Jing Zhao 
Authored: Fri Mar 6 10:55:56 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 15:17:57 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 15 +
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java | 63 
 3 files changed, 81 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b25491dc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1811337..d7ff237 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -85,6 +85,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7871. NameNodeEditLogRoller can keep printing "Swallowing exception"
 message. (jing9)
 
+HDFS-7885. Datanode should not trust the generation stamp provided by
+client. (Tsz Wo Nicholas Sze via jing9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b25491dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 0d9f096..0c2337e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2276,6 +2276,21 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   @Override // FsDatasetSpi
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
   throws IOException {
+synchronized(this) {
+  final Replica replica = volumeMap.get(block.getBlockPoolId(),
+  block.getBlockId());
+  if (replica == null) {
+throw new ReplicaNotFoundException(block);
+  }
+  if (replica.getGenerationStamp() < block.getGenerationStamp()) {
+throw new IOException(
+"Replica generation stamp < block generation stamp, block="
++ block + ", replica=" + replica);
+  } else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
+block.setGenerationStamp(replica.getGenerationStamp());
+  }
+}
+
 File datafile = getBlockFile(block);
 File metafile = FsDatasetUtil.getMetaFile(datafile, 
block.getGenerationStamp());
 BlockLocalPathInfo info = new BlockLocalPathInfo(block,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b25491dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
index cb50539..1c4134f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
@@ -30,11 +30,16 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.io.IOUtils;
 import 

[3/4] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong (cherry picked from commit 95bfd087dc89e57a93340604cc8b96042fa1a05a)

2015-09-01 Thread vinodkv
YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie 
Shen and Xuan Gong
(cherry picked from commit 95bfd087dc89e57a93340604cc8b96042fa1a05a)

(cherry picked from commit a5f3fb4dc14503bf7c454a48cf954fb0d6710de2)
(cherry picked from commit 27a2f0acb84202cc082090eef7eea57f6e42f9bb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ffdf7d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ffdf7d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ffdf7d1

Branch: refs/heads/branch-2.6.1
Commit: 7ffdf7d105fd2ce7a484cb96a96f414670bec141
Parents: 81417f7
Author: Jian He 
Authored: Thu Mar 5 21:14:41 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 15:12:53 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/api/ApplicationBaseProtocol.java   | 355 +++
 .../yarn/api/ApplicationClientProtocol.java | 290 +--
 .../yarn/api/ApplicationHistoryProtocol.java| 303 +---
 .../apache/hadoop/yarn/webapp/ResponseInfo.java |   6 +-
 .../hadoop/yarn/webapp/YarnWebParams.java   |   4 +
 .../hadoop/yarn/webapp/view/HtmlBlock.java  |   2 +
 .../ApplicationHistoryClientService.java| 176 +
 .../ApplicationHistoryManager.java  | 126 ++-
 .../ApplicationHistoryServer.java   |   2 +-
 .../webapp/AHSView.java |  28 +-
 .../webapp/AHSWebApp.java   |  16 +-
 .../webapp/AHSWebServices.java  |   6 +-
 .../webapp/AppAttemptPage.java  |  15 +-
 .../webapp/AppPage.java |  21 +-
 .../TestApplicationHistoryClientService.java|  12 +-
 .../webapp/TestAHSWebApp.java   |  27 +-
 .../webapp/TestAHSWebServices.java  |  26 +-
 .../yarn/server/api/ApplicationContext.java | 122 ---
 .../yarn/server/webapp/AppAttemptBlock.java | 119 ---
 .../hadoop/yarn/server/webapp/AppBlock.java | 197 --
 .../hadoop/yarn/server/webapp/AppsBlock.java|  53 ++-
 .../yarn/server/webapp/ContainerBlock.java  |  29 +-
 .../hadoop/yarn/server/webapp/WebPageUtils.java |  86 +
 .../hadoop/yarn/server/webapp/WebServices.java  |  68 +++-
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |  11 +-
 .../resourcemanager/webapp/AppAttemptPage.java  |  55 +++
 .../server/resourcemanager/webapp/AppBlock.java | 265 --
 .../server/resourcemanager/webapp/AppPage.java  |  27 +-
 .../resourcemanager/webapp/AppsBlock.java   | 131 ---
 .../webapp/AppsBlockWithMetrics.java|   1 +
 .../webapp/CapacitySchedulerPage.java   |   1 +
 .../resourcemanager/webapp/ContainerPage.java   |  44 +++
 .../webapp/DefaultSchedulerPage.java|   1 +
 .../webapp/FairSchedulerPage.java   |  21 +-
 .../server/resourcemanager/webapp/RMWebApp.java |   7 +-
 .../resourcemanager/webapp/RmController.java|   8 +
 .../server/resourcemanager/webapp/RmView.java   |  31 +-
 .../resourcemanager/webapp/TestAppPage.java |   8 +-
 .../resourcemanager/webapp/TestRMWebApp.java|  48 ++-
 .../webapp/TestRMWebAppFairScheduler.java   |  14 +-
 41 files changed, 1301 insertions(+), 1464 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ffdf7d1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5317b59..b8c15f9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -13,6 +13,9 @@ Release 2.6.1 - UNRELEASED
 
 YARN-3230. Clarify application states on the web UI. (Jian He via wangda)
 
+YARN-1809. Synchronize RM and TimeLineServer Web-UIs. (Zhijie Shen and
+Xuan Gong via jianhe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ffdf7d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
new file mode 100644
index 000..2a8a283
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
@@ -0,0 +1,355 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for 

[2/4] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong (cherry picked from commit 95bfd087dc89e57a93340604cc8b96042fa1a05a)

2015-09-01 Thread vinodkv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ffdf7d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
index 7bac6f2..2cd7580 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
@@ -20,15 +20,16 @@ package 
org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
 import static org.apache.hadoop.yarn.webapp.Params.TITLE;
 import static org.mockito.Mockito.mock;
-import org.junit.Assert;
 
+import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
 import 
org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
 import 
org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerImpl;
 import 
org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
@@ -68,8 +69,8 @@ public class TestAHSWebApp extends 
ApplicationHistoryStoreTestUtils {
   @Test
   public void testView() throws Exception {
 Injector injector =
-WebAppTests.createMockInjector(ApplicationContext.class,
-  mockApplicationHistoryManager(5, 1, 1));
+WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+  mockApplicationHistoryClientService(5, 1, 1));
 AHSView ahsViewInstance = injector.getInstance(AHSView.class);
 
 ahsViewInstance.render();
@@ -89,8 +90,8 @@ public class TestAHSWebApp extends 
ApplicationHistoryStoreTestUtils {
   @Test
   public void testAppPage() throws Exception {
 Injector injector =
-WebAppTests.createMockInjector(ApplicationContext.class,
-  mockApplicationHistoryManager(1, 5, 1));
+WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+  mockApplicationHistoryClientService(1, 5, 1));
 AppPage appPageInstance = injector.getInstance(AppPage.class);
 
 appPageInstance.render();
@@ -105,8 +106,8 @@ public class TestAHSWebApp extends 
ApplicationHistoryStoreTestUtils {
   @Test
   public void testAppAttemptPage() throws Exception {
 Injector injector =
-WebAppTests.createMockInjector(ApplicationContext.class,
-  mockApplicationHistoryManager(1, 1, 5));
+WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+  mockApplicationHistoryClientService(1, 1, 5));
 AppAttemptPage appAttemptPageInstance =
 injector.getInstance(AppAttemptPage.class);
 
@@ -123,8 +124,8 @@ public class TestAHSWebApp extends 
ApplicationHistoryStoreTestUtils {
   @Test
   public void testContainerPage() throws Exception {
 Injector injector =
-WebAppTests.createMockInjector(ApplicationContext.class,
-  mockApplicationHistoryManager(1, 1, 1));
+WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+  mockApplicationHistoryClientService(1, 1, 1));
 ContainerPage containerPageInstance =
 injector.getInstance(ContainerPage.class);
 
@@ -141,10 +142,12 @@ public class TestAHSWebApp extends 
ApplicationHistoryStoreTestUtils {
 WebAppTests.flushOutput(injector);
   }
 
-  ApplicationHistoryManager mockApplicationHistoryManager(int numApps,
+  ApplicationHistoryClientService mockApplicationHistoryClientService(int 
numApps,
   int numAppAttempts, int numContainers) throws Exception {
 ApplicationHistoryManager ahManager =
 new MockApplicationHistoryManagerImpl(store);
+ApplicationHistoryClientService historyClientService =
+new ApplicationHistoryClientService(ahManager);
 for (int i = 1; i <= numApps; ++i) {
   ApplicationId appId = 

[1/4] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong (cherry picked from commit 95bfd087dc89e57a93340604cc8b96042fa1a05a)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 81417f757 -> b25491dc4


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ffdf7d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 89b4a78..ffead48 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -31,6 +31,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedule
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerLeafQueueInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
+import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ffdf7d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
new file mode 100644
index 000..b8cd1ad
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+
+import org.apache.hadoop.yarn.server.webapp.ContainerBlock;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+
+public class ContainerPage extends RmView {
+
+  @Override
+  protected void preHead(Page.HTML<_> html) {
+commonPreHead(html);
+
+String containerId = $(YarnWebParams.CONTAINER_ID);
+set(TITLE, containerId.isEmpty() ? "Bad request: missing container ID"
+: join("Container ", $(YarnWebParams.CONTAINER_ID)));
+  }
+
+  @Override
+  protected Class content() {
+return ContainerBlock.class;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ffdf7d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
index e05987b..1c8828c 100644
--- 

hadoop git commit: YARN-3267. Timelineserver applies the ACL rules after applying the limit on the number of records (Chang Li via jeagles)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 2230754f2 -> a9bb641d5


YARN-3267. Timelineserver applies the ACL rules after applying the limit on the 
number of records (Chang Li via jeagles)

(cherry picked from commit 8180e676abb2bb500a48b3a0c0809d2a807ab235)
(cherry picked from commit 44aedad5ddc8069a6dba3eaf66ed54d612b21208)
(cherry picked from commit f4bbf2c8f97d3601132504453f61e472950a433e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9bb641d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9bb641d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9bb641d

Branch: refs/heads/branch-2.6.1
Commit: a9bb641d51f808e8d549b42e29da634dfb194c67
Parents: 2230754
Author: Jonathan Eagles 
Authored: Fri Mar 13 12:04:30 2015 -0500
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 16:04:19 2015 -0700

--
 .../jobhistory/TestJobHistoryEventHandler.java  | 14 +++---
 .../mapred/TestMRTimelineEventHandling.java | 12 ++---
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../distributedshell/TestDistributedShell.java  |  4 +-
 .../server/timeline/LeveldbTimelineStore.java   | 23 ++---
 .../server/timeline/MemoryTimelineStore.java| 12 -
 .../server/timeline/TimelineDataManager.java| 50 +++-
 .../yarn/server/timeline/TimelineReader.java|  3 +-
 .../timeline/TestLeveldbTimelineStore.java  | 16 +++
 .../timeline/TestTimelineDataManager.java   | 26 +-
 .../server/timeline/TimelineStoreTestUtils.java | 33 +
 11 files changed, 129 insertions(+), 67 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9bb641d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index a64f1d6..de260c9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -464,7 +464,7 @@ public class TestJobHistoryEventHandler {
   t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000),
   currentTime - 10));
   TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null,
-  null, null, null, null, null, null);
+  null, null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   TimelineEntity tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -480,7 +480,7 @@ public class TestJobHistoryEventHandler {
   new HashMap(), "default"),
   currentTime + 10));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -498,7 +498,7 @@ public class TestJobHistoryEventHandler {
   new JobQueueChangeEvent(TypeConverter.fromYarn(t.jobId), "q2"),
   currentTime - 20));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
@@ -520,7 +520,7 @@ public class TestJobHistoryEventHandler {
   new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0,
   0, new Counters(), new Counters(), new Counters()), 
currentTime));
   entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
-  null, null, null, null, null);
+  null, null, null, null, null, null);
   Assert.assertEquals(1, entities.getEntities().size());
   tEntity = entities.getEntities().get(0);
   Assert.assertEquals(t.jobId.toString(), 

hadoop git commit: HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. (Kihwal Lee via yliu)

2015-09-01 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 acb300018 -> 0fb6b6810


HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. 
(Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fb6b681
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fb6b681
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fb6b681

Branch: refs/heads/branch-2
Commit: 0fb6b68101001221e14c2e86f4a63028074cced0
Parents: acb3000
Author: yliu 
Authored: Wed Sep 2 09:01:31 2015 +0800
Committer: yliu 
Committed: Wed Sep 2 09:01:31 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/datanode/BPOfferService.java| 3 +--
 .../apache/hadoop/hdfs/server/datanode/BPServiceActor.java| 7 ---
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 5 +
 4 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb6b681/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6cf0d4f..73a93b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -958,6 +958,9 @@ Release 2.7.2 - UNRELEASED
 
 HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
 
+HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
+(Kihwal Lee via yliu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb6b681/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 5097e4a..b3d363f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -351,9 +351,8 @@ class BPOfferService {
 reg.getStorageInfo().getNamespaceID(), "namespace ID");
 checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
 reg.getStorageInfo().getClusterID(), "cluster ID");
-  } else {
-bpRegistration = reg;
   }
+  bpRegistration = reg;
 
   dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
   // Add the initial block token secret keys to the DN's secret manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb6b681/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 1817427..85ea6ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -767,15 +767,16 @@ class BPServiceActor implements Runnable {
   void register(NamespaceInfo nsInfo) throws IOException {
 // The handshake() phase loaded the block pool storage
 // off disk - so update the bpRegistration object from that info
-bpRegistration = bpos.createRegistration();
+DatanodeRegistration newBpRegistration = bpos.createRegistration();
 
 LOG.info(this + " beginning handshake with NN");
 
 while (shouldRun()) {
   try {
 // Use returned registration from namenode with updated fields
-bpRegistration = bpNamenode.registerDatanode(bpRegistration);
-bpRegistration.setNamespaceInfo(nsInfo);
+newBpRegistration = bpNamenode.registerDatanode(newBpRegistration);
+newBpRegistration.setNamespaceInfo(nsInfo);
+bpRegistration = newBpRegistration;
 break;
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb6b681/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

hadoop git commit: YARN-3393. Getting application(s) goes wrong when app finishes before starting the attempt. Contributed by Zhijie Shen

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 005d86549 -> 914cc8f4a


YARN-3393. Getting application(s) goes wrong when app finishes before
starting the attempt. Contributed by Zhijie Shen

(cherry picked from commit 9fae455e26e0230107e1c6db58a49a5b6b296cf4)
(cherry picked from commit cbdcdfad6de81e17fb586bc2a53b37da43defd79)
(cherry picked from commit 61aafdcfa589cbae8363976c745ea528b03f152d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/914cc8f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/914cc8f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/914cc8f4

Branch: refs/heads/branch-2.6.1
Commit: 914cc8f4a43ce7d5026fe55afb22697d1ca03e54
Parents: 005d865
Author: Xuan 
Authored: Mon Mar 23 20:33:16 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 18:14:51 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 ...pplicationHistoryManagerOnTimelineStore.java | 14 ---
 ...pplicationHistoryManagerOnTimelineStore.java | 39 +---
 3 files changed, 44 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/914cc8f4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5762671..d7e6622 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -117,6 +117,9 @@ Release 2.6.1 - UNRELEASED
 YARN-3369. Missing NullPointer check in AppSchedulingInfo causes RM to die.
 (Brahma Reddy Battula via wangda)
 
+YARN-3393. Getting application(s) goes wrong when app finishes before
+starting the attempt. (Zhijie Shen via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/914cc8f4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index cd429d0..ca7521f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -498,17 +498,19 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   if (app.appReport.getCurrentApplicationAttemptId() != null) {
 ApplicationAttemptReport appAttempt =
 
getApplicationAttempt(app.appReport.getCurrentApplicationAttemptId());
-if (appAttempt != null) {
-  app.appReport.setHost(appAttempt.getHost());
-  app.appReport.setRpcPort(appAttempt.getRpcPort());
-  app.appReport.setTrackingUrl(appAttempt.getTrackingUrl());
-  
app.appReport.setOriginalTrackingUrl(appAttempt.getOriginalTrackingUrl());
-}
+app.appReport.setHost(appAttempt.getHost());
+app.appReport.setRpcPort(appAttempt.getRpcPort());
+app.appReport.setTrackingUrl(appAttempt.getTrackingUrl());
+
app.appReport.setOriginalTrackingUrl(appAttempt.getOriginalTrackingUrl());
   }
 } catch (AuthorizationException e) {
   // AuthorizationException is thrown because the user doesn't have access
   app.appReport.setDiagnostics(null);
   app.appReport.setCurrentApplicationAttemptId(null);
+} catch (ApplicationAttemptNotFoundException e) {
+  // It's possible that the app is finished before the first attempt is 
created.
+  app.appReport.setDiagnostics(null);
+  app.appReport.setCurrentApplicationAttemptId(null);
 }
 if (app.appReport.getCurrentApplicationAttemptId() == null) {
   app.appReport.setCurrentApplicationAttemptId(


hadoop git commit: Revert "MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple mappers. Contributed by Brahma Reddy Battula."

2015-09-01 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5652131d2 -> 00804e245


Revert "MAPREDUCE-6363. [NNBench] Lease mismatch error when running with 
multiple mappers. Contributed by Brahma Reddy Battula."

This reverts commit 75a2560e51387ea31ef4609ef434475bbbc628f7.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00804e24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00804e24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00804e24

Branch: refs/heads/trunk
Commit: 00804e24579083d03a67b86c2c57e70b910dda8d
Parents: 5652131
Author: Akira Ajisaka 
Authored: Wed Sep 2 12:22:16 2015 +0900
Committer: Akira Ajisaka 
Committed: Wed Sep 2 12:22:16 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt  |  3 ---
 .../src/test/java/org/apache/hadoop/hdfs/NNBench.java | 14 --
 2 files changed, 4 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00804e24/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 27af9f9..88cca41 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -509,9 +509,6 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6389. Fix BaileyBorweinPlouffe CLI usage message. (Brahma Reddy 
Battula
 via devaraj)
 
-MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple
-mappers. (Brahma Reddy Battula via aajisaka)
-
 MAPREDUCE-6373. The logger reports total input paths but it is referring
 to input files. (Bibin A Chundatt via devaraj)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00804e24/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
index 8a1f1ba..b6c0104 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
@@ -57,8 +57,6 @@ import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reducer;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 /**
  * This program executes a specified operation that applies load to 
@@ -689,9 +687,6 @@ public class NNBench {
   dataDirName = conf.get("test.nnbench.datadir.name");
   op = conf.get("test.nnbench.operation");
   readFile = conf.getBoolean("test.nnbench.readFileAfterOpen", false);
-  int taskId =
-  TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID))
-  .getTaskID().getId();
   
   long totalTimeTPmS = 0l;
   long startTimeTPmS = 0l;
@@ -704,19 +699,18 @@ public class NNBench {
   successfulFileOps = 0l;
   
   if (barrier()) {
-String filePrefix = "file_" + taskId + "_";
 if (op.equals(OP_CREATE_WRITE)) {
   startTimeTPmS = System.currentTimeMillis();
-  doCreateWriteOp(filePrefix, reporter);
+  doCreateWriteOp("file_" + hostName + "_", reporter);
 } else if (op.equals(OP_OPEN_READ)) {
   startTimeTPmS = System.currentTimeMillis();
-  doOpenReadOp(filePrefix, reporter);
+  doOpenReadOp("file_" + hostName + "_", reporter);
 } else if (op.equals(OP_RENAME)) {
   startTimeTPmS = System.currentTimeMillis();
-  doRenameOp(filePrefix, reporter);
+  doRenameOp("file_" + hostName + "_", reporter);
 } else if (op.equals(OP_DELETE)) {
   startTimeTPmS = System.currentTimeMillis();
-  doDeleteOp(filePrefix, reporter);
+  doDeleteOp("file_" + hostName + "_", reporter);
 }
 
 endTimeTPms = System.currentTimeMillis();



hadoop git commit: Revert "MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple mappers. Contributed by Brahma Reddy Battula."

2015-09-01 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0fb6b6810 -> a61c0f5ca


Revert "MAPREDUCE-6363. [NNBench] Lease mismatch error when running with 
multiple mappers. Contributed by Brahma Reddy Battula."

This reverts commit 98446ee695f922ab079654bbc6d613ab334e371f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a61c0f5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a61c0f5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a61c0f5c

Branch: refs/heads/branch-2
Commit: a61c0f5ca101cff6f1907d029262da5544069897
Parents: 0fb6b68
Author: Akira Ajisaka 
Authored: Wed Sep 2 12:26:25 2015 +0900
Committer: Akira Ajisaka 
Committed: Wed Sep 2 12:26:25 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt  |  3 ---
 .../src/test/java/org/apache/hadoop/hdfs/NNBench.java | 14 --
 2 files changed, 4 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a61c0f5c/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 449b03b..3daa740 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -240,9 +240,6 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6389. Fix BaileyBorweinPlouffe CLI usage message. (Brahma Reddy 
Battula
 via devaraj)
 
-MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple
-mappers. (Brahma Reddy Battula via aajisaka)
-
 MAPREDUCE-6373. The logger reports total input paths but it is referring
 to input files. (Bibin A Chundatt via devaraj)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a61c0f5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
index 8a1f1ba..b6c0104 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
@@ -57,8 +57,6 @@ import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reducer;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 /**
  * This program executes a specified operation that applies load to 
@@ -689,9 +687,6 @@ public class NNBench {
   dataDirName = conf.get("test.nnbench.datadir.name");
   op = conf.get("test.nnbench.operation");
   readFile = conf.getBoolean("test.nnbench.readFileAfterOpen", false);
-  int taskId =
-  TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID))
-  .getTaskID().getId();
   
   long totalTimeTPmS = 0l;
   long startTimeTPmS = 0l;
@@ -704,19 +699,18 @@ public class NNBench {
   successfulFileOps = 0l;
   
   if (barrier()) {
-String filePrefix = "file_" + taskId + "_";
 if (op.equals(OP_CREATE_WRITE)) {
   startTimeTPmS = System.currentTimeMillis();
-  doCreateWriteOp(filePrefix, reporter);
+  doCreateWriteOp("file_" + hostName + "_", reporter);
 } else if (op.equals(OP_OPEN_READ)) {
   startTimeTPmS = System.currentTimeMillis();
-  doOpenReadOp(filePrefix, reporter);
+  doOpenReadOp("file_" + hostName + "_", reporter);
 } else if (op.equals(OP_RENAME)) {
   startTimeTPmS = System.currentTimeMillis();
-  doRenameOp(filePrefix, reporter);
+  doRenameOp("file_" + hostName + "_", reporter);
 } else if (op.equals(OP_DELETE)) {
   startTimeTPmS = System.currentTimeMillis();
-  doDeleteOp(filePrefix, reporter);
+  doDeleteOp("file_" + hostName + "_", reporter);
 }
 
 endTimeTPms = System.currentTimeMillis();



hadoop git commit: HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. (Kihwal Lee via yliu)

2015-09-01 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 015696fb8 -> 3ab43acca


HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. 
(Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ab43acc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ab43acc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ab43acc

Branch: refs/heads/branch-2.7
Commit: 3ab43accaf4226fcb2152fa005a75de592fd1f0e
Parents: 015696f
Author: yliu 
Authored: Wed Sep 2 09:00:22 2015 +0800
Committer: yliu 
Committed: Wed Sep 2 09:00:22 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/datanode/BPOfferService.java| 3 +--
 .../apache/hadoop/hdfs/server/datanode/BPServiceActor.java| 7 ---
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 5 +
 4 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ab43acc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b5bb7b0..2a41a9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -35,6 +35,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-8879. Quota by storage type usage incorrectly initialized upon 
namenode
 restart. (xyao)
 
+HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
+(Kihwal Lee via yliu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ab43acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 36a868e..88b8312 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -350,9 +350,8 @@ class BPOfferService {
 reg.getStorageInfo().getNamespaceID(), "namespace ID");
 checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
 reg.getStorageInfo().getClusterID(), "cluster ID");
-  } else {
-bpRegistration = reg;
   }
+  bpRegistration = reg;
 
   dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
   // Add the initial block token secret keys to the DN's secret manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ab43acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 49a1991..ca6cc03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -744,15 +744,16 @@ class BPServiceActor implements Runnable {
   void register(NamespaceInfo nsInfo) throws IOException {
 // The handshake() phase loaded the block pool storage
 // off disk - so update the bpRegistration object from that info
-bpRegistration = bpos.createRegistration();
+DatanodeRegistration newBpRegistration = bpos.createRegistration();
 
 LOG.info(this + " beginning handshake with NN");
 
 while (shouldRun()) {
   try {
 // Use returned registration from namenode with updated fields
-bpRegistration = bpNamenode.registerDatanode(bpRegistration);
-bpRegistration.setNamespaceInfo(nsInfo);
+newBpRegistration = bpNamenode.registerDatanode(newBpRegistration);
+newBpRegistration.setNamespaceInfo(nsInfo);
+bpRegistration = newBpRegistration;
 break;
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ab43acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

hadoop git commit: HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. (Kihwal Lee via yliu)

2015-09-01 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 462076715 -> 5652131d2


HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. 
(Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5652131d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5652131d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5652131d

Branch: refs/heads/trunk
Commit: 5652131d2ea68c408dd3cd8bee31723642a8cdde
Parents: 4620767
Author: yliu 
Authored: Wed Sep 2 08:58:51 2015 +0800
Committer: yliu 
Committed: Wed Sep 2 08:58:51 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/datanode/BPOfferService.java| 3 +--
 .../apache/hadoop/hdfs/server/datanode/BPServiceActor.java| 7 ---
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 5 +
 4 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 57ddcb2..ea398f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1302,6 +1302,9 @@ Release 2.7.2 - UNRELEASED
 
 HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
 
+HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
+(Kihwal Lee via yliu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 92323f1..7aab4f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -351,9 +351,8 @@ class BPOfferService {
 reg.getStorageInfo().getNamespaceID(), "namespace ID");
 checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
 reg.getStorageInfo().getClusterID(), "cluster ID");
-  } else {
-bpRegistration = reg;
   }
+  bpRegistration = reg;
 
   dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
   // Add the initial block token secret keys to the DN's secret manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 1817427..85ea6ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -767,15 +767,16 @@ class BPServiceActor implements Runnable {
   void register(NamespaceInfo nsInfo) throws IOException {
 // The handshake() phase loaded the block pool storage
 // off disk - so update the bpRegistration object from that info
-bpRegistration = bpos.createRegistration();
+DatanodeRegistration newBpRegistration = bpos.createRegistration();
 
 LOG.info(this + " beginning handshake with NN");
 
 while (shouldRun()) {
   try {
 // Use returned registration from namenode with updated fields
-bpRegistration = bpNamenode.registerDatanode(bpRegistration);
-bpRegistration.setNamespaceInfo(nsInfo);
+newBpRegistration = bpNamenode.registerDatanode(newBpRegistration);
+newBpRegistration.setNamespaceInfo(nsInfo);
+bpRegistration = newBpRegistration;
 break;
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

hadoop git commit: HDFS-7742. Favoring decommissioning node for replication can cause a block to stay underreplicated for long periods. Contributed by Nathan Roberts. (cherry picked from commit 04ee18

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 0b1e66f01 -> bc8728cd2


HDFS-7742. Favoring decommissioning node for replication can cause a block to 
stay
underreplicated for long periods. Contributed by Nathan Roberts.
(cherry picked from commit 04ee18ed48ceef34598f954ff40940abc9fde1d2)

(cherry picked from commit c4cedfc1d601127430c70ca8ca4d4e2ee2d1003d)
(cherry picked from commit c6b68a82adea8de488b255594d35db8e01f5fc8f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc8728cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc8728cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc8728cd

Branch: refs/heads/branch-2.6.1
Commit: bc8728cd27870e048fd90d1e07ea92e8c9ed310d
Parents: 0b1e66f
Author: Kihwal Lee 
Authored: Mon Mar 30 10:11:25 2015 -0500
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 19:04:52 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/BlockManager.java| 10 ++---
 .../blockmanagement/TestBlockManager.java   | 42 
 3 files changed, 50 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc8728cd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1a62757..67fd7ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -109,6 +109,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7960. The full block report should prune zombie storages even if
 they're not empty. (cmccabe and Eddy Xu via wang)
 
+HDFS-7742. Favoring decommissioning node for replication can cause a block
+to stay underreplicated for long periods (Nathan Roberts via kihwal)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc8728cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 69f3e46..e5d97d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1652,7 +1652,8 @@ public class BlockManager {
   // If so, do not select the node as src node
   if ((nodesCorrupt != null) && nodesCorrupt.contains(node))
 continue;
-  if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY
+  if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY 
+  && !node.isDecommissionInProgress() 
   && node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams)
   {
 continue; // already reached replication limit
@@ -1667,13 +1668,12 @@ public class BlockManager {
   // never use already decommissioned nodes
   if(node.isDecommissioned())
 continue;
-  // we prefer nodes that are in DECOMMISSION_INPROGRESS state
-  if(node.isDecommissionInProgress() || srcNode == null) {
+
+  // We got this far, current node is a reasonable choice
+  if (srcNode == null) {
 srcNode = node;
 continue;
   }
-  if(srcNode.isDecommissionInProgress())
-continue;
   // switch to a different node randomly
   // this to prevent from deterministically selecting the same node even
   // if the node failed to replicate the block on previous iterations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc8728cd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index ddb6143..7eec52d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -536,6 +536,48 @@ public class TestBlockManager {
   }
 
   @Test
+  public void 

hadoop git commit: HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade starts (Zhe Zhang via Colin P. McCabe)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 49ef26b5a -> 81a445edf


HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade 
starts (Zhe Zhang via Colin P. McCabe)

(cherry picked from commit 43b41f22411439c5e23629197fb2fde45dcf0f0f)
(cherry picked from commit 219eb22c1571f76df32967a930049d983cbf5024)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java

(cherry picked from commit 03798416bfe27383c52e4d9f632fe9fa168c6e95)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81a445ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81a445ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81a445ed

Branch: refs/heads/branch-2.6.1
Commit: 81a445edf81f42c90a05d764dfebfadfafad622b
Parents: 49ef26b
Author: Colin Patrick Mccabe 
Authored: Wed Mar 18 18:48:54 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 16:55:50 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/namenode/FSImage.java|  2 +-
 .../server/namenode/FileJournalManager.java |  2 +-
 .../hdfs/server/namenode/NNUpgradeUtil.java | 44 --
 .../org/apache/hadoop/hdfs/TestDFSUpgrade.java  | 48 +++-
 5 files changed, 93 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81a445ed/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 44a7139..ff335ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -97,6 +97,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7587. Edit log corruption can happen if append fails with a quota
 violation. (jing9)
 
+HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade
+starts (Zhe Zhang via Colin P. McCabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81a445ed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 9b72421..51efb51 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -393,7 +393,7 @@ public class FSImage implements Closeable {
 for (Iterator it = storage.dirIterator(false); 
it.hasNext();) {
   StorageDirectory sd = it.next();
   try {
-NNUpgradeUtil.doPreUpgrade(sd);
+NNUpgradeUtil.doPreUpgrade(conf, sd);
   } catch (Exception e) {
 LOG.error("Failed to move aside pre-upgrade storage " +
 "in image directory " + sd.getRoot(), e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81a445ed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index 101c42c..2df052b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -585,7 +585,7 @@ public class FileJournalManager implements JournalManager {
   public void doPreUpgrade() throws IOException {
 LOG.info("Starting upgrade of edits directory " + sd.getRoot());
 try {
- NNUpgradeUtil.doPreUpgrade(sd);
+ NNUpgradeUtil.doPreUpgrade(conf, sd);
 } catch (IOException ioe) {
  LOG.error("Failed to move aside pre-upgrade storage " +
  "in image directory " + sd.getRoot(), ioe);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81a445ed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
--
diff --git 

hadoop git commit: HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 81a445edf -> 553efd719


HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)

(cherry picked from commit 90164ffd84f6ef56e9f8f99dcc7424a8d115dbae)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java

(cherry picked from commit 2c9a7461ec2ceba5885e95bc79f8dcbfd198df60)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/553efd71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/553efd71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/553efd71

Branch: refs/heads/branch-2.6.1
Commit: 553efd719061e8fee98f91ed1b766d2e77c78c9c
Parents: 81a445e
Author: yliu 
Authored: Thu Mar 19 23:24:55 2015 +0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 17:06:37 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 41 
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +++-
 3 files changed, 49 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/553efd71/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ff335ab..904d02d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -100,6 +100,8 @@ Release 2.6.1 - UNRELEASED
 HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade
 starts (Zhe Zhang via Colin P. McCabe)
 
+HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/553efd71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index d26cc52..5a38351 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1931,6 +1931,47 @@ public class BlockManager {
   }
 
   /**
+   * Mark block replicas as corrupt except those on the storages in 
+   * newStorages list.
+   */
+  public void markBlockReplicasAsCorrupt(BlockInfo block, 
+  long oldGenerationStamp, long oldNumBytes, 
+  DatanodeStorageInfo[] newStorages) throws IOException {
+assert namesystem.hasWriteLock();
+BlockToMarkCorrupt b = null;
+if (block.getGenerationStamp() != oldGenerationStamp) {
+  b = new BlockToMarkCorrupt(block, oldGenerationStamp,
+  "genstamp does not match " + oldGenerationStamp
+  + " : " + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
+} else if (block.getNumBytes() != oldNumBytes) {
+  b = new BlockToMarkCorrupt(block,
+  "length does not match " + oldNumBytes
+  + " : " + block.getNumBytes(), Reason.SIZE_MISMATCH);
+} else {
+  return;
+}
+
+for (DatanodeStorageInfo storage : getStorages(block)) {
+  boolean isCorrupt = true;
+  if (newStorages != null) {
+for (DatanodeStorageInfo newStorage : newStorages) {
+  if (newStorage!= null && storage.equals(newStorage)) {
+isCorrupt = false;
+break;
+  }
+}
+  }
+  if (isCorrupt) {
+blockLog.info("BLOCK* markBlockReplicasAsCorrupt: mark block replica" +
+b + " on " + storage.getDatanodeDescriptor() +
+" as corrupt because the dn is not in the new committed " +
+"storage list.");
+markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor());
+  }
+}
+  }
+
+  /**
* processFirstBlockReport is intended only for processing "initial" block
* reports, the first block report received from a DN after it registers.
* It just adds all the valid replicas to the datanode, without calculating 


hadoop git commit: HDFS-7960. The full block report should prune zombie storages even if they're not empty. Contributed by Colin McCabe and Eddy Xu.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 9b105b78d -> 0b1e66f01


HDFS-7960. The full block report should prune zombie storages even if they're 
not empty. Contributed by Colin McCabe and Eddy Xu.

(cherry picked from commit 50ee8f4e67a66aa77c5359182f61f3e951844db6)
(cherry picked from commit 2f46ee50bd4efc82ba3d30bd36f7637ea9d9714e)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java

(cherry picked from commit 03d4af39e794dc03d764122077b434d658b6405e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b1e66f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b1e66f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b1e66f0

Branch: refs/heads/branch-2.6.1
Commit: 0b1e66f01dae1c5558e897e35b1cbe533d9c4542
Parents: 9b105b7
Author: Andrew Wang 
Authored: Mon Mar 23 22:00:34 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 18:30:18 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../DatanodeProtocolClientSideTranslatorPB.java |   5 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  15 +++
 .../server/blockmanagement/BlockManager.java|  55 +++-
 .../blockmanagement/DatanodeDescriptor.java |  51 ++-
 .../blockmanagement/DatanodeStorageInfo.java|  13 +-
 .../hdfs/server/datanode/BPServiceActor.java|  34 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  12 +-
 .../server/protocol/BlockReportContext.java |  52 +++
 .../hdfs/server/protocol/DatanodeProtocol.java  |  10 +-
 .../src/main/proto/DatanodeProtocol.proto   |  14 ++
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../TestNameNodePrunesMissingStorages.java  | 135 ++-
 .../server/datanode/BlockReportTestBase.java|   4 +-
 .../server/datanode/TestBPOfferService.java |  10 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   4 +-
 .../datanode/TestDataNodeVolumeFailure.java |   3 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 ...TestDnRespectsBlockReportSplitThreshold.java |   7 +-
 .../TestNNHandlesBlockReportPerStorage.java |   7 +-
 .../TestNNHandlesCombinedBlockReport.java   |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   9 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 .../hdfs/server/namenode/ha/TestDNFencing.java  |   4 +-
 25 files changed, 425 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b1e66f0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5ffe005..1a62757 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -106,6 +106,9 @@ Release 2.6.1 - UNRELEASED
 provided by the client is larger than the one stored in the datanode.
 (Brahma Reddy Battula via szetszwo)
 
+HDFS-7960. The full block report should prune zombie storages even if
+they're not empty. (cmccabe and Eddy Xu via wang)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b1e66f0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 46023ec..e169d0e 100644
--- 

hadoop git commit: MAPREDUCE-6303. Read timeout when retrying a fetch error can be fatal to a reducer. Contributed by Jason Lowe. (cherry picked from commit eccb7d46efbf07abcc6a01bd5e7d682f6815b824)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 bc8728cd2 -> 8a9665a58


MAPREDUCE-6303. Read timeout when retrying a fetch error can be fatal to a 
reducer. Contributed by Jason Lowe.
(cherry picked from commit eccb7d46efbf07abcc6a01bd5e7d682f6815b824)

(cherry picked from commit cacadea632f7ab6fe4fdb1432e1a2c48e8ebd55f)
(cherry picked from commit 2abd4f61075739514fb3e63b118448895be02a30)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a9665a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a9665a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a9665a5

Branch: refs/heads/branch-2.6.1
Commit: 8a9665a586624cfe7f11ad9e21976465e0bb0e21
Parents: bc8728c
Author: Junping Du 
Authored: Thu Apr 2 12:13:03 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 20:10:20 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../hadoop/mapreduce/task/reduce/Fetcher.java   | 73 ++--
 .../mapreduce/task/reduce/TestFetcher.java  | 33 +
 3 files changed, 74 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a9665a5/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 8a046c1..ff1e597 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -21,6 +21,9 @@ Release 2.6.1 - UNRELEASED
 MAPREDUCE-6230. Fixed RMContainerAllocator to update the new AMRMToken
 service name properly. (Jason Lowe via jianhe)
 
+MAPREDUCE-6303. Read timeout when retrying a fetch error can be fatal 
+to a reducer. (Jason Lowe via junping_du)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a9665a5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
index 3f40853..d867e4b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
@@ -258,6 +258,39 @@ class Fetcher extends Thread {
 closeConnection();
   }
 
+  private DataInputStream openShuffleUrl(MapHost host,
+  Set remaining, URL url) {
+DataInputStream input = null;
+
+try {
+  setupConnectionsWithRetry(host, remaining, url);
+  if (stopped) {
+abortConnect(host, remaining);
+  } else {
+input = new DataInputStream(connection.getInputStream());
+  }
+} catch (IOException ie) {
+  boolean connectExcpt = ie instanceof ConnectException;
+  ioErrs.increment(1);
+  LOG.warn("Failed to connect to " + host + " with " + remaining.size() +
+   " map outputs", ie);
+
+  // If connect did not succeed, just mark all the maps as failed,
+  // indirectly penalizing the host
+  scheduler.hostFailed(host.getHostName());
+  for(TaskAttemptID left: remaining) {
+scheduler.copyFailed(left, host, false, connectExcpt);
+  }
+
+  // Add back all the remaining maps, WITHOUT marking them as failed
+  for(TaskAttemptID left: remaining) {
+scheduler.putBackKnownMapOutput(host, left);
+  }
+}
+
+return input;
+  }
+
   /**
* The crux of the matter...
* 
@@ -286,38 +319,12 @@ class Fetcher extends Thread {
 Set remaining = new HashSet(maps);
 
 // Construct the url and connect
-DataInputStream input = null;
 URL url = getMapOutputURL(host, maps);
-try {
-  setupConnectionsWithRetry(host, remaining, url);
-  
-  if (stopped) {
-abortConnect(host, remaining);
-return;
-  }
-} catch (IOException ie) {
-  boolean connectExcpt = ie instanceof ConnectException;
-  ioErrs.increment(1);
-  LOG.warn("Failed to connect to " + host + " with " + remaining.size() + 
-   " map outputs", ie);
-
-  // If connect did not succeed, just mark all the maps as failed,
-  // indirectly penalizing the host
-  scheduler.hostFailed(host.getHostName());
-   

hadoop git commit: YARN-3249. Add a 'kill application' button to Resource Manager's Web UI. Contributed by Ryu Kobayashi.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 8b5bdac98 -> 81417f757


YARN-3249. Add a 'kill application' button to Resource Manager's Web UI. 
Contributed by Ryu Kobayashi.

(cherry picked from commit 1b672096121fef775572b517d4f5721997abbac6)
(cherry picked from commit 6660c2f83b855535217582326746dc76d53fdf61)
(cherry picked from commit 6ea859e435e7cd6bc342f67e1551ccb86fbd976f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81417f75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81417f75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81417f75

Branch: refs/heads/branch-2.6.1
Commit: 81417f757298ae74a290635cdeac935e13ed5628
Parents: 8b5bdac
Author: Tsuyoshi Ozawa 
Authored: Thu Mar 5 19:55:56 2015 +0900
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 14:37:21 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 
 .../server/resourcemanager/webapp/AppBlock.java | 35 
 3 files changed, 44 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81417f75/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cd70bcb..5317b59 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -6,6 +6,9 @@ Release 2.6.1 - UNRELEASED
 
   NEW FEATURES
 
+YARN-3249. Add a 'kill application' button to Resource Manager's Web UI.
+(Ryu Kobayashi via ozawa)
+
   IMPROVEMENTS
 
 YARN-3230. Clarify application states on the web UI. (Jian He via wangda)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81417f75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 83cbfc5..6d9c0da 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -185,6 +185,12 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME = 
   false;
 
+  /** Enable Resource Manager webapp ui actions */
+  public static final String RM_WEBAPP_UI_ACTIONS_ENABLED =
+RM_PREFIX + "webapp.ui-actions.enabled";
+  public static final boolean DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED =
+true;
+
   /** Whether the RM should enable Reservation System */
   public static final String RM_RESERVATION_SYSTEM_ENABLE = RM_PREFIX
   + "reservation-system.enable";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81417f75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
index c2b376e..bae4947 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
@@ -36,6 +36,7 @@ import 
org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -60,12 +61,16 @@ public class AppBlock extends HtmlBlock {
 
   private final Configuration conf;
   private final ResourceManager rm;
+  private final boolean rmWebAppUIActions;
 
  

hadoop git commit: YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving events for old client. (Zhihai Xu via kasha)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 dbc5bab9f -> 5a6755cc0


YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving 
events for old client. (Zhihai Xu via kasha)

(cherry picked from commit 8d88691d162f87f95c9ed7e0a569ef08e8385d4f)
(cherry picked from commit 0d62e948877e5d50f1b6fbe735a94ac6da5ff472)
(cherry picked from commit 4a5b0e708d42fbff571229a43d1762d1767e2db5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a6755cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a6755cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a6755cc

Branch: refs/heads/branch-2.6.1
Commit: 5a6755cc0fccb96d6cded15dec4b426c7f047e54
Parents: dbc5bab
Author: Karthik Kambatla 
Authored: Wed Mar 4 19:47:02 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 14:06:34 2015 -0700

--
 .../apache/hadoop/ha/ClientBaseWithFixes.java   | 11 +++-
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../recovery/ZKRMStateStore.java| 53 
 .../TestZKRMStateStoreZKClientConnections.java  | 33 +---
 4 files changed, 70 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a6755cc/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
index 11d4657..f063863 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
@@ -90,6 +90,14 @@ public abstract class ClientBaseWithFixes extends ZKTestCase 
{
 // XXX this doesn't need to be volatile! (Should probably be final)
 volatile CountDownLatch clientConnected;
 volatile boolean connected;
+protected ZooKeeper client;
+
+public void initializeWatchedClient(ZooKeeper zk) {
+if (client != null) {
+throw new RuntimeException("Watched Client was already set");
+}
+client = zk;
+}
 
 public CountdownWatcher() {
 reset();
@@ -191,8 +199,7 @@ public abstract class ClientBaseWithFixes extends 
ZKTestCase {
 zk.close();
 }
 }
-
-
+watcher.initializeWatchedClient(zk);
 return zk;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a6755cc/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 334a672..1d0518e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -94,6 +94,9 @@ Release 2.6.1 - UNRELEASED
 YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with 
pending 
 jobs. (Siqi Li via kasha)
 
+YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher 
receiving 
+events for old client. (Zhihai Xu via kasha)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a6755cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index 1774b39..8abc64e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -153,7 +153,13 @@ public class ZKRMStateStore extends RMStateStore {
 
   @VisibleForTesting
   protected ZooKeeper zkClient;
-  private ZooKeeper oldZkClient;
+
+  /* activeZkClient is not used to do actual operations,
+   * it is only used to verify client session for watched events and
+   * it gets activated into zkClient on 

hadoop git commit: YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending jobs. (Siqi Li via kasha)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 2692401df -> dbc5bab9f


YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending 
jobs. (Siqi Li via kasha)

(cherry picked from commit 22426a1c9f4bd616558089b6862fd34ab42d19a7)
(cherry picked from commit 721d7b574126c4070322f70ec5b49a7b8558a4c7)
(cherry picked from commit 5dfa25f22a989222e8b3d1013117b3350a48b2c5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbc5bab9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbc5bab9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbc5bab9

Branch: refs/heads/branch-2.6.1
Commit: dbc5bab9fd19fe76713174f257457b4359784127
Parents: 2692401
Author: Karthik Kambatla 
Authored: Wed Mar 4 18:06:36 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 13:54:04 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/fair/FairScheduler.java   |   1 +
 .../scheduler/fair/MaxRunningAppsEnforcer.java  |  40 ++-
 .../scheduler/fair/TestFairScheduler.java   | 310 ++-
 4 files changed, 348 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbc5bab9/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 78c191f..334a672 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -91,6 +91,9 @@ Release 2.6.1 - UNRELEASED
 YARN-3222. Fixed RMNode to send scheduler events in sequential order when a
 node reconnects. (Rohith Sharma K S via jianhe)
 
+YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with 
pending 
+jobs. (Siqi Li via kasha)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbc5bab9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index d633981..6c50e6f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1388,6 +1388,7 @@ public class FairScheduler extends
 allocConf = queueInfo;
 allocConf.getDefaultSchedulingPolicy().initialize(clusterResource);
 queueMgr.updateAllocationConfiguration(allocConf);
+maxRunningEnforcer.updateRunnabilityOnReload();
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbc5bab9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
index feeda1e..45a8907 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
@@ -105,6 +105,26 @@ public class MaxRunningAppsEnforcer {
   }
 
   /**
+   * This is called after reloading the allocation configuration when the
+   * scheduler is reinitilized
+   *
+   * Checks to see whether any non-runnable applications become runnable
+   * now that the max running apps of given queue has been changed
+   *
+   * Runs in O(n) where n is the 

hadoop git commit: YARN-3287. Made TimelineClient put methods do as the correct login context. Contributed by Daryn Sharp and Jonathan Eagles.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 a57ada6c1 -> 9005b141a


YARN-3287. Made TimelineClient put methods do as the correct login context. 
Contributed by Daryn Sharp and Jonathan Eagles.

(cherry picked from commit d6e05c5ee26feefc17267b7c9db1e2a3dbdef117)
(cherry picked from commit a94d23762e2cf4211fe84661eb67504c7072db49)
(cherry picked from commit 68e07eb50b872ec8a78923df8f5f640f08a72aa2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9005b141
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9005b141
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9005b141

Branch: refs/heads/branch-2.6.1
Commit: 9005b141a5188b2509b115348dea50816766b7f1
Parents: a57ada6
Author: Zhijie Shen 
Authored: Mon Mar 9 13:54:36 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 15:24:36 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../client/api/impl/TimelineClientImpl.java |  82 +++
 .../TestTimelineAuthenticationFilter.java   | 221 +--
 3 files changed, 135 insertions(+), 171 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9005b141/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1b1d4c0..ef76d45 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -108,6 +108,9 @@ Release 2.6.1 - UNRELEASED
 YARN-3227. Timeline renew delegation token fails when RM user's TGT is 
expired
 (Zhijie Shen via xgong)
 
+YARN-3287. Made TimelineClient put methods do as the correct login context.
+(Daryn Sharp and Jonathan Eagles via zjshen)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9005b141/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index ad5345c..f5c85c1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
@@ -105,6 +106,8 @@ public class TimelineClientImpl extends TimelineClient {
   private DelegationTokenAuthenticator authenticator;
   private DelegationTokenAuthenticatedURL.Token token;
   private URI resURI;
+  private UserGroupInformation authUgi;
+  private String doAsUser;
 
   @Private
   @VisibleForTesting
@@ -246,6 +249,15 @@ public class TimelineClientImpl extends TimelineClient {
   }
 
   protected void serviceInit(Configuration conf) throws Exception {
+UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+UserGroupInformation realUgi = ugi.getRealUser();
+if (realUgi != null) {
+  authUgi = realUgi;
+  doAsUser = ugi.getShortUserName();
+} else {
+  authUgi = ugi;
+  doAsUser = null;
+}
 ClientConfig cc = new DefaultClientConfig();
 cc.getClasses().add(YarnJacksonJaxbJsonProvider.class);
 connConfigurator = newConnConfigurator(conf);
@@ -295,16 +307,20 @@ public class TimelineClientImpl extends TimelineClient {
 doPosting(domain, "domain");
   }
 
-  private ClientResponse doPosting(Object obj, String path) throws 
IOException, YarnException {
+  private ClientResponse doPosting(final Object obj, final String path)
+  throws IOException, YarnException {
 ClientResponse resp;
 try {
-  resp = doPostingObject(obj, path);
-} catch (RuntimeException re) {
-  // runtime exception is expected if the client cannot connect the server
-  String msg =
-  "Failed to get the response from the timeline server.";
-  LOG.error(msg, re);
-  throw re;
+  

hadoop git commit: HDFS-7830. DataNode does not release the volume lock when adding a volume fails. (Lei Xu via Colin P. McCabe)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 5dbb0325d -> 9642a861e


HDFS-7830. DataNode does not release the volume lock when adding a volume 
fails. (Lei Xu via Colin P. McCabe)

(cherry picked from commit 5c1036d598051cf6af595740f1ab82092b0b6554)
(cherry picked from commit eefca23e8c5e474de1e25bf2ec8a5b266bbe8cfe)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java

(cherry picked from commit c723f3b1bd9eab261ab5edca33c4dae5ce3d0d30)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9642a861
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9642a861
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9642a861

Branch: refs/heads/branch-2.6.1
Commit: 9642a861e152fe4472b4d0ca30ef69adc48c0cb4
Parents: 5dbb032
Author: Colin Patrick Mccabe 
Authored: Tue Mar 10 18:20:25 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 15:35:08 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/common/Storage.java  |  2 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 16 ++-
 .../datanode/TestDataNodeHotSwapVolumes.java| 34 ++
 .../fsdataset/impl/FsDatasetTestUtil.java   | 49 
 .../fsdataset/impl/TestFsDatasetImpl.java   | 41 
 6 files changed, 112 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9642a861/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9796321..ed845f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -91,6 +91,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
 Colin P. McCabe)
 
+HDFS-7830. DataNode does not release the volume lock when adding a volume
+fails. (Lei Xu via Colin P. Mccabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9642a861/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 14b52ce..8d0129a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -672,7 +672,7 @@ public abstract class Storage extends StorageInfo {
  */
 public void lock() throws IOException {
   if (isShared()) {
-LOG.info("Locking is disabled");
+LOG.info("Locking is disabled for " + this.root);
 return;
   }
   FileLock newLock = tryLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9642a861/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index cbcf6b8..f24d644 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -46,6 +46,7 @@ import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
@@ -322,6 +323,12 @@ class FsDatasetImpl implements FsDatasetSpi {
 LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
   }
 
+  @VisibleForTesting
+  public FsVolumeImpl 

hadoop git commit: HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt synchronization. (Sean Busbey via yliu)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 9642a861e -> 2230754f2


HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt 
synchronization. (Sean Busbey via yliu)

(cherry picked from commit 813c93cb250d6d556604fe98845b979970bd5e18)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2230754f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2230754f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2230754f

Branch: refs/heads/branch-2.6.1
Commit: 2230754f2afaf5a5ce2c9a823ab80bc49e0bb150
Parents: 9642a86
Author: yliu 
Authored: Fri Mar 13 02:26:16 2015 +0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 15:42:37 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../apache/hadoop/crypto/CryptoOutputStream.java | 19 ---
 2 files changed, 15 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2230754f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a518100..2141078 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -48,6 +48,9 @@ Release 2.6.1 - UNRELEASED
 HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
 should be non static. (Sean Busbey via yliu)
 
+HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt
+synchronization. (Sean Busbey via yliu)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2230754f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index f1b2737..9e79fbf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -40,6 +40,9 @@ import com.google.common.base.Preconditions;
  * padding = pos%(algorithm blocksize); 
  * 
  * The underlying stream offset is maintained as state.
+ *
+ * Note that while some of this class' methods are synchronized, this is just 
to
+ * match the threadsafety behavior of DFSOutputStream. See HADOOP-11710.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -125,7 +128,7 @@ public class CryptoOutputStream extends FilterOutputStream 
implements
* @throws IOException
*/
   @Override
-  public void write(byte[] b, int off, int len) throws IOException {
+  public synchronized void write(byte[] b, int off, int len) throws 
IOException {
 checkStream();
 if (b == null) {
   throw new NullPointerException();
@@ -212,14 +215,16 @@ public class CryptoOutputStream extends 
FilterOutputStream implements
   }
   
   @Override
-  public void close() throws IOException {
+  public synchronized void close() throws IOException {
 if (closed) {
   return;
 }
-
-super.close();
-freeBuffers();
-closed = true;
+try {
+  super.close();
+  freeBuffers();
+} finally {
+  closed = true;
+}
   }
   
   /**
@@ -227,7 +232,7 @@ public class CryptoOutputStream extends FilterOutputStream 
implements
* underlying stream, then do the flush.
*/
   @Override
-  public void flush() throws IOException {
+  public synchronized void flush() throws IOException {
 checkStream();
 encrypt();
 super.flush();



[1/2] hadoop git commit: YARN-3222. Fixed NPE on RMNodeImpl#ReconnectNodeTransition when a node is reconnected with a different port. Contributed by Rohith Sharmaks (cherry picked from commit b2f1ec31

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 6e090bc53 -> 2692401df


YARN-3222. Fixed NPE on RMNodeImpl#ReconnectNodeTransition when a node is 
reconnected with a different port. Contributed by Rohith Sharmaks
(cherry picked from commit b2f1ec312ee431aef762cfb49cb29cd6f4661e86)

(cherry picked from commit 888a44563819ba910dc3cc10d10ee0fb8f05db61)
(cherry picked from commit b78f87825bd593e30b2f2ea76f37c7a4fd673ab2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db92b09e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db92b09e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db92b09e

Branch: refs/heads/branch-2.6.1
Commit: db92b09e0396d4f0e5b227c40a1c0a8ca62eb048
Parents: 6e090bc
Author: Jian He 
Authored: Tue Mar 3 16:25:57 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 13:39:35 2015 -0700

--
 .../resourcemanager/rmnode/RMNodeImpl.java  | 34 +++-
 .../yarn/server/resourcemanager/MockNM.java |  6 +++-
 .../TestResourceTrackerService.java | 17 +-
 3 files changed, 39 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db92b09e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 1774eb5..b92f399 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -564,12 +564,12 @@ public class RMNodeImpl implements RMNode, 
EventHandler {
 rmNode.nodeUpdateQueue.clear();
 rmNode.context.getDispatcher().getEventHandler().handle(
 new NodeRemovedSchedulerEvent(rmNode));
-
+
 if (rmNode.getHttpPort() == newNode.getHttpPort()) {
   // Reset heartbeat ID since node just restarted.
   rmNode.getLastNodeHeartBeatResponse().setResponseId(0);
-  if (rmNode.getState() != NodeState.UNHEALTHY) {
-// Only add new node if old state is not UNHEALTHY
+  if (rmNode.getState().equals(NodeState.RUNNING)) {
+// Only add new node if old state is RUNNING
 rmNode.context.getDispatcher().getEventHandler().handle(
 new NodeAddedSchedulerEvent(newNode));
   }
@@ -590,28 +590,30 @@ public class RMNodeImpl implements RMNode, 
EventHandler {
   } else {
 rmNode.httpPort = newNode.getHttpPort();
 rmNode.httpAddress = newNode.getHttpAddress();
-rmNode.totalCapability = newNode.getTotalCapability();
+boolean isCapabilityChanged = false;
+if (rmNode.getTotalCapability() != newNode.getTotalCapability()) {
+  rmNode.totalCapability = newNode.getTotalCapability();
+  isCapabilityChanged = true;
+}
   
 // Reset heartbeat ID since node just restarted.
 rmNode.getLastNodeHeartBeatResponse().setResponseId(0);
-  }
 
-  if (null != reconnectEvent.getRunningApplications()) {
 for (ApplicationId appId : reconnectEvent.getRunningApplications()) {
   handleRunningAppOnNode(rmNode, rmNode.context, appId, rmNode.nodeId);
 }
-  }
 
-  rmNode.context.getDispatcher().getEventHandler().handle(
-  new NodesListManagerEvent(
-  NodesListManagerEventType.NODE_USABLE, rmNode));
-  if (rmNode.getState().equals(NodeState.RUNNING)) {
-// Update scheduler node's capacity for reconnect node.
-rmNode.context.getDispatcher().getEventHandler().handle(
-new NodeResourceUpdateSchedulerEvent(rmNode, 
-ResourceOption.newInstance(newNode.getTotalCapability(), -1)));
+if (isCapabilityChanged
+&& rmNode.getState().equals(NodeState.RUNNING)) {
+  // Update scheduler node's capacity for reconnect node.
+  rmNode.context
+  .getDispatcher()
+  .getEventHandler()
+  .handle(
+  new NodeResourceUpdateSchedulerEvent(rmNode, ResourceOption
+  

[2/2] hadoop git commit: YARN-3222. Added the missing CHANGES.txt entry.

2015-09-01 Thread vinodkv
YARN-3222. Added the missing CHANGES.txt entry.

(cherry picked from commit 4620767156ecc43424bc6c7c4d50519e2563cc69)
(cherry picked from commit 015696fb81912d545ce602b245c456c2741a3922)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2692401d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2692401d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2692401d

Branch: refs/heads/branch-2.6.1
Commit: 2692401dfbb49782cae8338e5f6c623a4541d96b
Parents: db92b09
Author: Vinod Kumar Vavilapalli 
Authored: Tue Sep 1 13:43:10 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 13:49:04 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2692401d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 78d7093..78c191f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -88,6 +88,9 @@ Release 2.6.1 - UNRELEASED
 YARN-3239. WebAppProxy does not support a final tracking url which has
 query fragments and params (Jian He via jlowe)
 
+YARN-3222. Fixed RMNode to send scheduler events in sequential order when a
+node reconnects. (Rohith Sharma K S via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



hadoop git commit: HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be non static. (Sean Busbey via yliu)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 5a6755cc0 -> bb1b87f3b


HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be 
non static. (Sean Busbey via yliu)

(cherry picked from commit b569c3ab1cb7e328dde822f6b2405d24b9560e3a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb1b87f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb1b87f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb1b87f3

Branch: refs/heads/branch-2.6.1
Commit: bb1b87f3b8e1da99e56aa690fd5c4011ad4eb532
Parents: 5a6755c
Author: yliu 
Authored: Thu Mar 5 06:39:58 2015 +0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 14:28:19 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java | 2 +-
 .../main/java/org/apache/hadoop/crypto/CryptoOutputStream.java| 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb1b87f3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2dc1520..a518100 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -45,6 +45,9 @@ Release 2.6.1 - UNRELEASED
 HADOOP-11604. Prevent ConcurrentModificationException while closing domain
 sockets during shutdown of DomainSocketWatcher thread. (cnauroth)
 
+HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
+should be non static. (Sean Busbey via yliu)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb1b87f3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index 4b53563..5445d8e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -60,7 +60,7 @@ public class CryptoInputStream extends FilterInputStream 
implements
 Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
 CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, 
 ReadableByteChannel {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Decryptor decryptor;
   private final int bufferSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb1b87f3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index fc07923..f1b2737 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -45,7 +45,7 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Evolving
 public class CryptoOutputStream extends FilterOutputStream implements 
 Syncable, CanSetDropBehind {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Encryptor encryptor;
   private final int bufferSize;



hadoop git commit: HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin P. McCabe)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 9005b141a -> 5dbb0325d


HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin 
P. McCabe)

(cherry picked from commit a17584936cc5141e3f5612ac3ecf35e27968e439)
(cherry picked from commit 7779f38e68ca4e0f7ac08eb7e5f4801b89979d02)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java

(cherry picked from commit 65ae3e2ff16ce1114a0115ff916837b0173b77f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5dbb0325
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5dbb0325
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5dbb0325

Branch: refs/heads/branch-2.6.1
Commit: 5dbb0325df4f95e5f2ab48fc8c627d1b6807eb42
Parents: 9005b14
Author: Colin Patrick Mccabe 
Authored: Tue Jan 20 20:11:09 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 15:29:59 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 16 +
 .../datanode/fsdataset/impl/FsVolumeList.java   |  8 +++--
 .../fsdataset/impl/TestFsDatasetImpl.java   | 37 ++--
 4 files changed, 52 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5dbb0325/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d7ff237..9796321 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -88,6 +88,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7885. Datanode should not trust the generation stamp provided by
 client. (Tsz Wo Nicholas Sze via jing9)
 
+HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
+Colin P. McCabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5dbb0325/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 0c2337e..cbcf6b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -336,7 +336,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 
 StorageType storageType = location.getStorageType();
 final FsVolumeImpl fsVolume = new FsVolumeImpl(
-this, sd.getStorageUuid(), dir, this.conf, storageType);
+this, sd.getStorageUuid(), sd.getCurrentDir(), this.conf, storageType);
 final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume);
 ArrayList exceptions = Lists.newArrayList();
 
@@ -379,19 +379,19 @@ class FsDatasetImpl implements FsDatasetSpi 
{
*/
   @Override
   public synchronized void removeVolumes(Collection volumes) {
-Set volumeSet = new HashSet();
+Set volumeSet = new HashSet();
 for (StorageLocation sl : volumes) {
-  volumeSet.add(sl.getFile());
+  volumeSet.add(sl.getFile().getAbsolutePath());
 }
 for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
   Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-  if (volumeSet.contains(sd.getRoot())) {
-String volume = sd.getRoot().toString();
+  String volume = sd.getRoot().getAbsolutePath();
+  if (volumeSet.contains(volume)) {
 LOG.info("Removing " + volume + " from FsDataset.");
 
 // Disable the volume from the service.
 asyncDiskService.removeVolume(sd.getCurrentDir());
-this.volumes.removeVolume(volume);
+this.volumes.removeVolume(sd.getRoot());
 
 // Removed all replica information for the blocks on the volume. Unlike
 // updating the volumeMap in addVolume(), this operation does not scan
@@ -401,7 +401,9 @@ class FsDatasetImpl implements FsDatasetSpi {
   for (Iterator it = volumeMap.replicas(bpid).iterator();
   it.hasNext(); ) {
 ReplicaInfo block = it.next();
-if 

hadoop git commit: YARN-3222. Added the missing CHANGES.txt entry.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7d833a305 -> acb300018


YARN-3222. Added the missing CHANGES.txt entry.

(cherry picked from commit 4620767156ecc43424bc6c7c4d50519e2563cc69)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/acb30001
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/acb30001
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/acb30001

Branch: refs/heads/branch-2
Commit: acb3000187bcd8eef51a65e30a473e620af2e39e
Parents: 7d833a3
Author: Vinod Kumar Vavilapalli 
Authored: Tue Sep 1 13:43:10 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 13:44:10 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/acb30001/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f8c6c96..6b81b5d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1656,6 +1656,9 @@ Release 2.7.0 - 2015-04-20
 YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a 
queue's
 available resource-limit from the parent queue. (Wangda Tan via vinodkv)
 
+YARN-3222. Fixed RMNode to send scheduler events in sequential order when a
+node reconnects. (Rohith Sharma K S via jianhe)
+
 YARN-3131. YarnClientImpl should check FAILED and KILLED state in
 submitApplication (Chang Li via jlowe)
 



hadoop git commit: YARN-3222. Added the missing CHANGES.txt entry.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0eb9b1932 -> 462076715


YARN-3222. Added the missing CHANGES.txt entry.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46207671
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46207671
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46207671

Branch: refs/heads/trunk
Commit: 4620767156ecc43424bc6c7c4d50519e2563cc69
Parents: 0eb9b19
Author: Vinod Kumar Vavilapalli 
Authored: Tue Sep 1 13:43:10 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 13:43:10 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46207671/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e2b1307..b33111c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1701,6 +1701,9 @@ Release 2.7.0 - 2015-04-20
 YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a 
queue's
 available resource-limit from the parent queue. (Wangda Tan via vinodkv)
 
+YARN-3222. Fixed RMNode to send scheduler events in sequential order when a
+node reconnects. (Rohith Sharma K S via jianhe)
+
 YARN-3131. YarnClientImpl should check FAILED and KILLED state in
 submitApplication (Chang Li via jlowe)
 



hadoop git commit: YARN-3230. Clarify application states on the web UI. (Jian He via wangda)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 bb1b87f3b -> 8b5bdac98


YARN-3230. Clarify application states on the web UI. (Jian He via wangda)

(cherry picked from commit ce5bf927c3d9f212798de1bf8706e5e9def235a1)
(cherry picked from commit a1963968d2a9589fcefaab0d63feeb68c07f4d06)
(cherry picked from commit 591e261ccf1fb5dd25e87665c8d5c0341ff6fb24)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b5bdac9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b5bdac9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b5bdac9

Branch: refs/heads/branch-2.6.1
Commit: 8b5bdac98eddad516f7faa74596b6293d50a17a7
Parents: bb1b87f
Author: Wangda Tan 
Authored: Fri Feb 20 10:39:28 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 14:34:07 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 ++
 .../server/resourcemanager/webapp/AppBlock.java | 33 ++--
 .../resourcemanager/webapp/AppsBlock.java   |  6 ++--
 .../resourcemanager/webapp/dao/AppInfo.java |  8 ++---
 4 files changed, 41 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b5bdac9/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1d0518e..cd70bcb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -8,6 +8,8 @@ Release 2.6.1 - UNRELEASED
 
   IMPROVEMENTS
 
+YARN-3230. Clarify application states on the web UI. (Jian He via wangda)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b5bdac9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
index 1856d75..c2b376e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
@@ -32,8 +32,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -131,8 +133,9 @@ public class AppBlock extends HtmlBlock {
 ._("Name:", app.getName())
 ._("Application Type:", app.getApplicationType())
 ._("Application Tags:", app.getApplicationTags())
-._("State:", app.getState())
-._("FinalStatus:", app.getFinalStatus())
+._("YarnApplicationState:", clarifyAppState(app.getState()))
+._("FinalStatus reported by AM:",
+  clairfyAppFinalStatus(app.getFinalStatus()))
 ._("Started:", Times.format(app.getStartTime()))
 ._("Elapsed:",
 StringUtils.formatTime(Times.elapsed(app.getStartTime(),
@@ -198,4 +201,30 @@ public class AppBlock extends HtmlBlock {
 table._();
 div._();
   }
+
+  private String clarifyAppState(YarnApplicationState state) {
+String ret = state.toString();
+switch (state) {
+case NEW:
+  return ret + ": waiting for application to be initialized";
+case NEW_SAVING:
+  return ret + ": waiting for application to be persisted in state-store.";
+case SUBMITTED:
+  return ret + ": waiting for application to be accepted by scheduler.";
+case ACCEPTED:
+  return ret + ": waiting for AM container to be allocated, launched and"
+  + " register with RM.";
+case RUNNING:
+  return ret + ": AM has registered with RM and started running.";
+default:
+  return ret;
+}
+  }
+
+ 

hadoop git commit: YARN-3227. Timeline renew delegation token fails when RM user's TGT is expired. Contributed by Zhijie Shen

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 b25491dc4 -> a57ada6c1


YARN-3227. Timeline renew delegation token fails when RM user's TGT is
expired. Contributed by Zhijie Shen

(cherry picked from commit d1abc5d4fc00bb1b226066684556ba16ace71744)
(cherry picked from commit 56c2050ab7c04e9741bcba9504b71e5a54d09eea)
(cherry picked from commit 780a9b1a98827a692e0ea9fbc92f9d1ab979e3e0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a57ada6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a57ada6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a57ada6c

Branch: refs/heads/branch-2.6.1
Commit: a57ada6c1f0c921df910c8b69cddbddd55a4f1b0
Parents: b25491d
Author: Xuan 
Authored: Fri Mar 6 13:32:05 2015 -0800
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 15:21:46 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java| 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a57ada6c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b8c15f9..1b1d4c0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -105,6 +105,9 @@ Release 2.6.1 - UNRELEASED
 YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher 
receiving 
 events for old client. (Zhihai Xu via kasha)
 
+YARN-3227. Timeline renew delegation token fails when RM user's TGT is 
expired
+(Zhijie Shen via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a57ada6c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index 78901c3..ad5345c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -425,6 +425,7 @@ public class TimelineClientImpl extends TimelineClient {
 UserGroupInformation callerUGI = isProxyAccess ?
 UserGroupInformation.getCurrentUser().getRealUser()
 : UserGroupInformation.getCurrentUser();
+callerUGI.checkTGTAndReloginFromKeytab();
 try {
   return callerUGI.doAs(action);
 } catch (UndeclaredThrowableException e) {
@@ -474,6 +475,7 @@ public class TimelineClientImpl extends TimelineClient {
   : UserGroupInformation.getCurrentUser();
   final String doAsUser = isProxyAccess ?
   UserGroupInformation.getCurrentUser().getShortUserName() : null;
+  callerUGI.checkTGTAndReloginFromKeytab();
   try {
 return callerUGI.doAs(new 
PrivilegedExceptionAction() {
   @Override



hadoop git commit: HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via yliu)

2015-09-01 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8dbe2a12b -> 2345627ad


HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via 
yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2345627a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2345627a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2345627a

Branch: refs/heads/branch-2
Commit: 2345627ad3f81c5854cc6021ea102d1f62633515
Parents: 8dbe2a1
Author: yliu 
Authored: Tue Sep 1 16:25:11 2015 +0800
Committer: yliu 
Committed: Tue Sep 1 16:25:11 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   9 ++---
 hadoop-common-project/hadoop-common/pom.xml |  19 +--
 .../java/org/apache/hadoop/fs/test-untar.tar| Bin 20480 -> 0 bytes
 .../java/org/apache/hadoop/fs/test-untar.tgz| Bin 2024 -> 0 bytes
 .../src/test/resources/test-untar.tar   | Bin 0 -> 20480 bytes
 .../src/test/resources/test-untar.tgz   | Bin 0 -> 2024 bytes
 6 files changed, 7 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2345627a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d813bed..bb269c8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -175,6 +175,12 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12050. Enable MaxInactiveInterval for hadoop http auth token
 (hzlu via benoyantony)
 
+HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
+(wang)
+
+HADOOP-12367. Move TestFileUtil's test resources to resources folder.
+(wang via yliu)
+
   BUG FIXES
 
 HADOOP-12124. Add HTrace support for FsShell (cmccabe)
@@ -246,9 +252,6 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
 (Anu Engineer via xyao)
 
-HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
-(wang)
-
  OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2345627a/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 6deadd8..a064a57 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -448,23 +448,6 @@
 
   
   
-copy-test-tarballs
-process-test-resources
-
-  run
-
-
-  
-
-  
-
-
-  
-
-  
-
-  
-  
 pre-site
 
   run
@@ -500,7 +483,7 @@
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h
-
src/test/java/org/apache/hadoop/fs/test-untar.tgz
+src/test/resources/test-untar.tgz
 src/test/resources/test.har/_SUCCESS
 src/test/resources/test.har/_index
 src/test/resources/test.har/_masterindex

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2345627a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
deleted file mode 100644
index 949e985..000
Binary files 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2345627a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
deleted file mode 100644
index 9e9ef40..000
Binary files 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
 and /dev/null 

hadoop git commit: YARN-2801. Add documentation for node labels feature. Contributed by Wangda Tan and Naganarasimha G R.

2015-09-01 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 f44ed4f4b -> 1b5bf5800


YARN-2801. Add documentation for node labels feature. Contributed by Wangda Tan 
and Naganarasimha G R.

(cherry picked from commit faa38e1aa49907254bf981662a8aeb5dc52e75e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b5bf580
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b5bf580
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b5bf580

Branch: refs/heads/branch-2.7
Commit: 1b5bf58001d68e83534d4fc93959cc2c42540629
Parents: f44ed4f
Author: Tsuyoshi Ozawa 
Authored: Tue Sep 1 17:54:14 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Tue Sep 1 17:56:08 2015 +0900

--
 hadoop-project/src/site/site.xml|   1 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../src/site/markdown/NodeLabel.md  | 140 +++
 3 files changed, 144 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5bf580/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 9fa7e4d..df93945 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -120,6 +120,7 @@
   
   
   
+  
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5bf580/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e07c8ae..ee9b66a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -20,6 +20,9 @@ Release 2.7.2 - UNRELEASED
 YARN-4092. Fixed UI redirection to print useful messages when both RMs are
 in standby mode. (Xuan Gong via jianhe)
 
+YARN-2801. Add documentation for node labels feature. (Wangda Tan and 
Naganarasimha 
+G R  via ozawa)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5bf580/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
new file mode 100644
index 000..87019cd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
@@ -0,0 +1,140 @@
+
+
+YARN Node Labels
+===
+
+# Overview
+Node label is a way to group nodes with similar characteristics and 
applications can specify where to run.
+
+Now we only support node partition, which is:
+
+* One node can have only one node partition, so a cluster is partitioned to 
several disjoint sub-clusters by node partitions. By default, nodes belong to 
DEFAULT partition (partition="")
+* User need to configure how much resources of each partition can be used by 
different queues. For more detail, please refer next section.
+* There are two kinds of node partitions:
+* Exclusive: containers will be allocated to nodes with exactly match node 
partition. (e.g. asking partition=“x” will be allocated to node with 
partition=“x”, asking DEFAULT partition will be allocated to DEFAULT 
partition nodes).
+* Non-exclusive: if a partition is non-exclusive, it shares idle resource 
to container requesting DEFAULT partition.
+
+User can specify set of node labels which can be accessed by each queue, one 
application can only use subset of node labels that can be accessed by the 
queue which contains the application.
+
+# Features
+The ```Node Labels``` supports the following features for now:
+
+* Partition cluster - each node can be assigned one label, so the cluster will 
be divided to several smaller disjoint partitions.
+* ACL of node-labels on queues - user can set accessible node labels on each 
queue so only some nodes can only be accessed by specific queues.
+* Specify percentage of resource of a partition which can be accessed by a 
queue - user can set percentage like: queue A can access 30% of resources on 
nodes with label=hbase. Such percentage setting will be consistent with 
existing resource manager
+* Specify required Node Label in resource request, it will only be allocated 
when node has the same label. If no node label requirement specified, such 
Resource Request will only be allocated on nodes belong to DEFAULT partition.
+* Operability
+* Node labels and node labels mapping can be recovered across RM restart
+* Update node labels - admin can update labels on nodes and labels on 
queues
+  when RM is 

[1/2] hadoop git commit: YARN-4082. Container shouldn't be killed when node's label updated. Contributed by Wangda Tan.

2015-09-01 Thread vvasudev
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2345627ad -> 855e0f8b0
  refs/heads/trunk f4d96be6c -> bf669b6d9


YARN-4082. Container shouldn't be killed when node's label updated. Contributed 
by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf669b6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf669b6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf669b6d

Branch: refs/heads/trunk
Commit: bf669b6d9f8ba165e30b8823218d625a49958925
Parents: f4d96be
Author: Varun Vasudev 
Authored: Tue Sep 1 14:19:11 2015 +0530
Committer: Varun Vasudev 
Committed: Tue Sep 1 14:19:11 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/capacity/AbstractCSQueue.java |  27 ++
 .../scheduler/capacity/CSQueue.java |  26 ++
 .../scheduler/capacity/CapacityScheduler.java   |  40 +--
 .../scheduler/capacity/LeafQueue.java   |  16 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java |   9 +
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 249 ---
 7 files changed, 314 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 80cf793..999654d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -804,6 +804,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3896. RMNode transitioned from RUNNING to REBOOTED because its 
response id 
 has not been reset synchronously. (Jun Gong via rohithsharmaks)
 
+YARN-4082. Container shouldn't be killed when node's label updated.
+(Wangda Tan via vvasudev)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 792c25c..0ae4d1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -543,6 +544,32 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
   }
   
+  @Override
+  public void incUsedResource(String nodeLabel, Resource resourceToInc,
+  SchedulerApplicationAttempt application) {
+if (nodeLabel == null) {
+  nodeLabel = RMNodeLabelsManager.NO_LABEL;
+}
+// ResourceUsage has its own lock, no addition lock needs here.
+queueUsage.incUsed(nodeLabel, resourceToInc);
+if (null != parent) {
+  parent.incUsedResource(nodeLabel, resourceToInc, null);
+}
+  }
+
+  @Override
+  public void decUsedResource(String nodeLabel, Resource resourceToDec,
+  SchedulerApplicationAttempt application) {
+if (nodeLabel == null) {
+  nodeLabel = RMNodeLabelsManager.NO_LABEL;
+}
+// ResourceUsage has its own lock, no addition lock needs here.
+queueUsage.decUsed(nodeLabel, resourceToDec);
+if (null != parent) {
+  parent.decUsedResource(nodeLabel, resourceToDec, null);
+}
+  }
+
   /**
* Return if the queue has pending resource on given nodePartition and
* schedulingMode. 


[2/2] hadoop git commit: YARN-4082. Container shouldn't be killed when node's label updated. Contributed by Wangda Tan.

2015-09-01 Thread vvasudev
YARN-4082. Container shouldn't be killed when node's label updated. Contributed 
by Wangda Tan.

(cherry picked from commit bf669b6d9f8ba165e30b8823218d625a49958925)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/855e0f8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/855e0f8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/855e0f8b

Branch: refs/heads/branch-2
Commit: 855e0f8b0064db0fa90d616216230e523c84d39a
Parents: 2345627
Author: Varun Vasudev 
Authored: Tue Sep 1 14:19:11 2015 +0530
Committer: Varun Vasudev 
Committed: Tue Sep 1 14:20:54 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/capacity/AbstractCSQueue.java |  27 ++
 .../scheduler/capacity/CSQueue.java |  26 ++
 .../scheduler/capacity/CapacityScheduler.java   |  40 +--
 .../scheduler/capacity/LeafQueue.java   |  16 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java |   9 +
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 249 ---
 7 files changed, 314 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/855e0f8b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 837d3cb..c19bd7b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -752,6 +752,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3896. RMNode transitioned from RUNNING to REBOOTED because its 
response id 
 has not been reset synchronously. (Jun Gong via rohithsharmaks)
 
+YARN-4082. Container shouldn't be killed when node's label updated.
+(Wangda Tan via vvasudev)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/855e0f8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 792c25c..0ae4d1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -543,6 +544,32 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
   }
   
+  @Override
+  public void incUsedResource(String nodeLabel, Resource resourceToInc,
+  SchedulerApplicationAttempt application) {
+if (nodeLabel == null) {
+  nodeLabel = RMNodeLabelsManager.NO_LABEL;
+}
+// ResourceUsage has its own lock, no addition lock needs here.
+queueUsage.incUsed(nodeLabel, resourceToInc);
+if (null != parent) {
+  parent.incUsedResource(nodeLabel, resourceToInc, null);
+}
+  }
+
+  @Override
+  public void decUsedResource(String nodeLabel, Resource resourceToDec,
+  SchedulerApplicationAttempt application) {
+if (nodeLabel == null) {
+  nodeLabel = RMNodeLabelsManager.NO_LABEL;
+}
+// ResourceUsage has its own lock, no addition lock needs here.
+queueUsage.decUsed(nodeLabel, resourceToDec);
+if (null != parent) {
+  parent.decUsedResource(nodeLabel, resourceToDec, null);
+}
+  }
+
   /**
* Return if the queue has pending resource on given nodePartition and
* schedulingMode. 


hadoop git commit: YARN-2801. Add documentation for node labels feature. Contributed by Wangda Tan and Naganarasimha G R.

2015-09-01 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk bf669b6d9 -> faa38e1aa


YARN-2801. Add documentation for node labels feature. Contributed by Wangda Tan 
and Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/faa38e1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/faa38e1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/faa38e1a

Branch: refs/heads/trunk
Commit: faa38e1aa49907254bf981662a8aeb5dc52e75e3
Parents: bf669b6
Author: Tsuyoshi Ozawa 
Authored: Tue Sep 1 17:54:14 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Tue Sep 1 17:54:49 2015 +0900

--
 hadoop-project/src/site/site.xml|   1 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../src/site/markdown/NodeLabel.md  | 140 +++
 3 files changed, 144 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/faa38e1a/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index ee0dfcd..272ea7b 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -121,6 +121,7 @@
   
   
   
+  
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/faa38e1a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 999654d..e2b1307 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -827,6 +827,9 @@ Release 2.7.2 - UNRELEASED
 YARN-4092. Fixed UI redirection to print useful messages when both RMs are
 in standby mode. (Xuan Gong via jianhe)
 
+YARN-2801. Add documentation for node labels feature. (Wangda Tan and 
Naganarasimha 
+G R  via ozawa)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/faa38e1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
new file mode 100644
index 000..87019cd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
@@ -0,0 +1,140 @@
+
+
+YARN Node Labels
+===
+
+# Overview
+Node label is a way to group nodes with similar characteristics and 
applications can specify where to run.
+
+Now we only support node partition, which is:
+
+* One node can have only one node partition, so a cluster is partitioned to 
several disjoint sub-clusters by node partitions. By default, nodes belong to 
DEFAULT partition (partition="")
+* User need to configure how much resources of each partition can be used by 
different queues. For more detail, please refer next section.
+* There are two kinds of node partitions:
+* Exclusive: containers will be allocated to nodes with exactly match node 
partition. (e.g. asking partition=“x” will be allocated to node with 
partition=“x”, asking DEFAULT partition will be allocated to DEFAULT 
partition nodes).
+* Non-exclusive: if a partition is non-exclusive, it shares idle resource 
to container requesting DEFAULT partition.
+
+User can specify set of node labels which can be accessed by each queue, one 
application can only use subset of node labels that can be accessed by the 
queue which contains the application.
+
+# Features
+The ```Node Labels``` supports the following features for now:
+
+* Partition cluster - each node can be assigned one label, so the cluster will 
be divided to several smaller disjoint partitions.
+* ACL of node-labels on queues - user can set accessible node labels on each 
queue so only some nodes can only be accessed by specific queues.
+* Specify percentage of resource of a partition which can be accessed by a 
queue - user can set percentage like: queue A can access 30% of resources on 
nodes with label=hbase. Such percentage setting will be consistent with 
existing resource manager
+* Specify required Node Label in resource request, it will only be allocated 
when node has the same label. If no node label requirement specified, such 
Resource Request will only be allocated on nodes belong to DEFAULT partition.
+* Operability
+* Node labels and node labels mapping can be recovered across RM restart
+* Update node labels - admin can update labels on nodes and labels on 
queues
+  when RM is running
+
+# Configuration
+## Setting up ```ResourceManager``` to enable 

hadoop git commit: YARN-2801. Add documentation for node labels feature. Contributed by Wangda Tan and Naganarasimha G R.

2015-09-01 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 855e0f8b0 -> d3d65f62b


YARN-2801. Add documentation for node labels feature. Contributed by Wangda Tan 
and Naganarasimha G R.

(cherry picked from commit faa38e1aa49907254bf981662a8aeb5dc52e75e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3d65f62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3d65f62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3d65f62

Branch: refs/heads/branch-2
Commit: d3d65f62b39dfe3f4d0efa47e5129e2aa3033d5e
Parents: 855e0f8
Author: Tsuyoshi Ozawa 
Authored: Tue Sep 1 17:54:14 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Tue Sep 1 17:55:53 2015 +0900

--
 hadoop-project/src/site/site.xml|   1 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../src/site/markdown/NodeLabel.md  | 140 +++
 3 files changed, 144 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3d65f62/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 0e2ab2e..9b888de 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -120,6 +120,7 @@
   
   
   
+  
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3d65f62/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c19bd7b..f8c6c96 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -775,6 +775,9 @@ Release 2.7.2 - UNRELEASED
 YARN-4092. Fixed UI redirection to print useful messages when both RMs are
 in standby mode. (Xuan Gong via jianhe)
 
+YARN-2801. Add documentation for node labels feature. (Wangda Tan and 
Naganarasimha 
+G R  via ozawa)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3d65f62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
new file mode 100644
index 000..87019cd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
@@ -0,0 +1,140 @@
+
+
+YARN Node Labels
+===
+
+# Overview
+Node label is a way to group nodes with similar characteristics and 
applications can specify where to run.
+
+Now we only support node partition, which is:
+
+* One node can have only one node partition, so a cluster is partitioned to 
several disjoint sub-clusters by node partitions. By default, nodes belong to 
DEFAULT partition (partition="")
+* User need to configure how much resources of each partition can be used by 
different queues. For more detail, please refer next section.
+* There are two kinds of node partitions:
+* Exclusive: containers will be allocated to nodes with exactly match node 
partition. (e.g. asking partition=“x” will be allocated to node with 
partition=“x”, asking DEFAULT partition will be allocated to DEFAULT 
partition nodes).
+* Non-exclusive: if a partition is non-exclusive, it shares idle resource 
to container requesting DEFAULT partition.
+
+User can specify set of node labels which can be accessed by each queue, one 
application can only use subset of node labels that can be accessed by the 
queue which contains the application.
+
+# Features
+The ```Node Labels``` supports the following features for now:
+
+* Partition cluster - each node can be assigned one label, so the cluster will 
be divided to several smaller disjoint partitions.
+* ACL of node-labels on queues - user can set accessible node labels on each 
queue so only some nodes can only be accessed by specific queues.
+* Specify percentage of resource of a partition which can be accessed by a 
queue - user can set percentage like: queue A can access 30% of resources on 
nodes with label=hbase. Such percentage setting will be consistent with 
existing resource manager
+* Specify required Node Label in resource request, it will only be allocated 
when node has the same label. If no node label requirement specified, such 
Resource Request will only be allocated on nodes belong to DEFAULT partition.
+* Operability
+* Node labels and node labels mapping can be recovered across RM restart
+* Update node labels - admin can update labels on nodes and labels on 
queues
+  when RM is 

hadoop git commit: HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via yliu)

2015-09-01 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7ad3556ed -> f4d96be6c


HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via 
yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d96be6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d96be6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d96be6

Branch: refs/heads/trunk
Commit: f4d96be6c637ff54903615cff04b365e25bb3229
Parents: 7ad3556
Author: yliu 
Authored: Tue Sep 1 16:20:56 2015 +0800
Committer: yliu 
Committed: Tue Sep 1 16:20:56 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +++
 hadoop-common-project/hadoop-common/pom.xml |  19 +--
 .../java/org/apache/hadoop/fs/test-untar.tar| Bin 20480 -> 0 bytes
 .../java/org/apache/hadoop/fs/test-untar.tgz| Bin 2024 -> 0 bytes
 .../src/test/resources/test-untar.tar   | Bin 0 -> 20480 bytes
 .../src/test/resources/test-untar.tgz   | Bin 0 -> 2024 bytes
 6 files changed, 4 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0f52d22..14e6fda 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -759,6 +759,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
 (wang)
 
+HADOOP-12367. Move TestFileUtil's test resources to resources folder.
+(wang via yliu)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 282735d..3ae09a0 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -454,23 +454,6 @@
 
   
   
-copy-test-tarballs
-process-test-resources
-
-  run
-
-
-  
-
-  
-
-
-  
-
-  
-
-  
-  
 pre-site
 
   run
@@ -505,7 +488,7 @@
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h
-
src/test/java/org/apache/hadoop/fs/test-untar.tgz
+src/test/resources/test-untar.tgz
 src/test/resources/test.har/_SUCCESS
 src/test/resources/test.har/_index
 src/test/resources/test.har/_masterindex

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
deleted file mode 100644
index 949e985..000
Binary files 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
deleted file mode 100644
index 9e9ef40..000
Binary files 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar 
b/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar
new file mode 100644
index 000..949e985
Binary files /dev/null and 

hadoop git commit: HADOOP-12359. hadoop fs -getmerge doc is wrong. Contributed by Jagadesh Kiran N.

2015-09-01 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk faa38e1aa -> 2e251a767


HADOOP-12359. hadoop fs -getmerge doc is wrong. Contributed by Jagadesh Kiran N.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e251a76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e251a76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e251a76

Branch: refs/heads/trunk
Commit: 2e251a767427a38ecb6c309ad979feecb29a09f4
Parents: faa38e1
Author: Akira Ajisaka 
Authored: Tue Sep 1 20:55:33 2015 +0900
Committer: Akira Ajisaka 
Committed: Tue Sep 1 20:55:33 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt|  3 +++
 .../hadoop-common/src/site/markdown/FileSystemShell.md | 13 +++--
 2 files changed, 14 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e251a76/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 14e6fda..4eef964 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1120,6 +1120,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-12061. Incorrect command in single cluster setup document.
 (Kengo Seki via aajisaka)
 
+HADOOP-12359. hadoop fs -getmerge doc is wrong.
+(Jagadesh Kiran N via aajisaka)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e251a76/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index fb89ca1..d6d00e4 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -368,9 +368,18 @@ Returns 0 on success and non-zero on error.
 getmerge
 
 
-Usage: `hadoop fs -getmerge   [addnl]`
+Usage: `hadoop fs -getmerge [-nl]  `
 
-Takes a source directory and a destination file as input and concatenates 
files in src into the destination local file. Optionally addnl can be set to 
enable adding a newline character at the end of each file.
+Takes a source directory and a destination file as input and concatenates 
files in src into the destination local file. Optionally -nl can be set to 
enable adding a newline character (LF) at the end of each file.
+
+Examples:
+
+* `hadoop fs -getmerge -nl  /src  /opt/output.txt`
+* `hadoop fs -getmerge -nl  /src/file1.txt /src/file2.txt  /output.txt`
+
+Exit Code:
+
+Returns 0 on success and non-zero on error.
 
 help
 



hadoop git commit: HADOOP-12359. hadoop fs -getmerge doc is wrong. Contributed by Jagadesh Kiran N.

2015-09-01 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d3d65f62b -> 236c4ab51


HADOOP-12359. hadoop fs -getmerge doc is wrong. Contributed by Jagadesh Kiran N.

(cherry picked from commit 2e251a767427a38ecb6c309ad979feecb29a09f4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/236c4ab5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/236c4ab5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/236c4ab5

Branch: refs/heads/branch-2
Commit: 236c4ab511055d0b67d2c73160125cc63015ac07
Parents: d3d65f6
Author: Akira Ajisaka 
Authored: Tue Sep 1 20:55:33 2015 +0900
Committer: Akira Ajisaka 
Committed: Tue Sep 1 20:56:46 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt|  3 +++
 .../hadoop-common/src/site/markdown/FileSystemShell.md | 13 +++--
 2 files changed, 14 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/236c4ab5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bb269c8..7c12a47 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -623,6 +623,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-12061. Incorrect command in single cluster setup document.
 (Kengo Seki via aajisaka)
 
+HADOOP-12359. hadoop fs -getmerge doc is wrong.
+(Jagadesh Kiran N via aajisaka)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/236c4ab5/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 6fa81eb..fdde735 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -368,9 +368,18 @@ Returns 0 on success and non-zero on error.
 getmerge
 
 
-Usage: `hadoop fs -getmerge   [addnl]`
+Usage: `hadoop fs -getmerge [-nl]  `
 
-Takes a source directory and a destination file as input and concatenates 
files in src into the destination local file. Optionally addnl can be set to 
enable adding a newline character at the end of each file.
+Takes a source directory and a destination file as input and concatenates 
files in src into the destination local file. Optionally -nl can be set to 
enable adding a newline character (LF) at the end of each file.
+
+Examples:
+
+* `hadoop fs -getmerge -nl  /src  /opt/output.txt`
+* `hadoop fs -getmerge -nl  /src/file1.txt /src/file2.txt  /output.txt`
+
+Exit Code:
+
+Returns 0 on success and non-zero on error.
 
 help
 



hadoop git commit: HADOOP-12359. hadoop fs -getmerge doc is wrong. Contributed by Jagadesh Kiran N.

2015-09-01 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 1b5bf5800 -> bcb98ba08


HADOOP-12359. hadoop fs -getmerge doc is wrong. Contributed by Jagadesh Kiran N.

(cherry picked from commit 2e251a767427a38ecb6c309ad979feecb29a09f4)
(cherry picked from commit 236c4ab511055d0b67d2c73160125cc63015ac07)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcb98ba0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcb98ba0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcb98ba0

Branch: refs/heads/branch-2.7
Commit: bcb98ba08152c01e5227412fddde484d41cc4e1b
Parents: 1b5bf58
Author: Akira Ajisaka 
Authored: Tue Sep 1 20:55:33 2015 +0900
Committer: Akira Ajisaka 
Committed: Tue Sep 1 20:57:10 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt|  3 +++
 .../hadoop-common/src/site/markdown/FileSystemShell.md | 13 +++--
 2 files changed, 14 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcb98ba0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0a8d185..cdc7b66 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -32,6 +32,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-12061. Incorrect command in single cluster setup document.
 (Kengo Seki via aajisaka)
 
+HADOOP-12359. hadoop fs -getmerge doc is wrong.
+(Jagadesh Kiran N via aajisaka)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcb98ba0/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 37f644d..494ad71 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -368,9 +368,18 @@ Returns 0 on success and non-zero on error.
 getmerge
 
 
-Usage: `hadoop fs -getmerge   [addnl]`
+Usage: `hadoop fs -getmerge [-nl]  `
 
-Takes a source directory and a destination file as input and concatenates 
files in src into the destination local file. Optionally addnl can be set to 
enable adding a newline character at the end of each file.
+Takes a source directory and a destination file as input and concatenates 
files in src into the destination local file. Optionally -nl can be set to 
enable adding a newline character (LF) at the end of each file.
+
+Examples:
+
+* `hadoop fs -getmerge -nl  /src  /opt/output.txt`
+* `hadoop fs -getmerge -nl  /src/file1.txt /src/file2.txt  /output.txt`
+
+Exit Code:
+
+Returns 0 on success and non-zero on error.
 
 help
 



hadoop git commit: HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl lock for a very long time (sinago via cmccabe)

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 8a9665a58 -> c3f5ea11e


HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl lock 
for a very long time (sinago via cmccabe)

(cherry picked from commit 28bebc81db8bb6d1bc2574de7564fe4c595cfe09)
(cherry picked from commit a827089905524e10638c783ba908a895d621911d)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

(cherry picked from commit c3a3092c37926eca75ea149c4c061742f6599b40)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3f5ea11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3f5ea11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3f5ea11

Branch: refs/heads/branch-2.6.1
Commit: c3f5ea11eca30a617cab2a716dd08dff20db3791
Parents: 8a9665a
Author: Colin Patrick Mccabe 
Authored: Mon Apr 6 08:54:46 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 20:15:20 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 67 +---
 2 files changed, 47 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3f5ea11/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 67fd7ed..31d0c02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -112,6 +112,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7742. Favoring decommissioning node for replication can cause a block
 to stay underreplicated for long periods (Nathan Roberts via kihwal)
 
+HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
+lock for a very long time (sinago via cmccabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3f5ea11/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index f24d644..e352ea3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1180,30 +1180,51 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   }
 
   @Override // FsDatasetSpi
-  public synchronized ReplicaInPipeline createTemporary(StorageType 
storageType,
-  ExtendedBlock b) throws IOException {
-ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
b.getBlockId());
-if (replicaInfo != null) {
-  if (replicaInfo.getGenerationStamp() < b.getGenerationStamp()
-  && replicaInfo instanceof ReplicaInPipeline) {
-// Stop the previous writer
-((ReplicaInPipeline)replicaInfo)
-  
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
-invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
-  } else {
-throw new ReplicaAlreadyExistsException("Block " + b +
-" already exists in state " + replicaInfo.getState() +
-" and thus cannot be created.");
+  public ReplicaInPipeline createTemporary(
+  StorageType storageType, ExtendedBlock b) throws IOException {
+long startTimeMs = Time.monotonicNow();
+long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
+ReplicaInfo lastFoundReplicaInfo = null;
+do {
+  synchronized (this) {
+ReplicaInfo currentReplicaInfo =
+volumeMap.get(b.getBlockPoolId(), b.getBlockId());
+if (currentReplicaInfo == lastFoundReplicaInfo) {
+  if (lastFoundReplicaInfo != null) {
+invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo 
});
+  }
+  FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());
+  // create a temporary file to hold block in the designated volume
+  File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
+  ReplicaInPipeline newReplicaInfo =
+  new ReplicaInPipeline(b.getBlockId(), b.getGenerationStamp(), v,
+  f.getParentFile(), 0);
+  

hadoop git commit: HDFS-8388. Time and Date format need to be in sync in NameNode UI page. Contributed by Surendra Singh Lilhore.

2015-09-01 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 00804e245 -> 65ccf2b12


HDFS-8388. Time and Date format need to be in sync in NameNode UI page. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65ccf2b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65ccf2b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65ccf2b1

Branch: refs/heads/trunk
Commit: 65ccf2b1252a5c83755fa24a93cf1d30ee59b2c3
Parents: 00804e2
Author: Akira Ajisaka 
Authored: Wed Sep 2 14:28:38 2015 +0900
Committer: Akira Ajisaka 
Committed: Wed Sep 2 14:28:38 2015 +0900

--
 .../hadoop-common/src/site/markdown/Metrics.md   | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 5 +
 .../apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java   | 6 ++
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 5 ++---
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js   | 6 +++---
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html  | 1 +
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.js| 2 +-
 .../hadoop-hdfs/src/main/webapps/static/dfs-dust.js  | 8 +++-
 9 files changed, 30 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index de706ad..8722968 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -192,6 +192,8 @@ Each metrics record contains tags such as ProcessName, 
SessionId, and Hostname a
 | `PutImageNumOps` | Total number of fsimage uploads to SecondaryNameNode |
 | `PutImageAvgTime` | Average fsimage upload time in milliseconds |
 | `TotalFileOps`| Total number of file operations performed |
+| `NNStarted`| NameNode start time |
+| `NNStartedTimeInMillis`| NameNode start time in milliseconds |
 
 FSNamesystem
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ea398f2..14a9248 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1270,6 +1270,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8950. NameNode refresh doesn't remove DataNodes that are no longer in
 the allowed list (Daniel Templeton)
 
+HDFS-8388. Time and Date format need to be in sync in NameNode UI page.
+(Surendra Singh Lilhore via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f4952f7..adcb1d6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -6131,6 +6131,11 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return getStartTime().toString();
   }
 
+  @Override // NameNodeMXBean
+  public long getNNStartedTimeInMillis() {
+return startTime;
+  }
+
   @Override  // NameNodeMXBean
   public String getCompileInfo() {
 return VersionInfo.getDate() + " by " + VersionInfo.getUser() +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ccf2b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
index 0e4d445..00c1abe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
+++ 

hadoop git commit: MAPREDUCE-6300. Task list sort by task id broken. Contributed by Siqi Li.

2015-09-01 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 752e3da73 -> dcc0d4658


MAPREDUCE-6300. Task list sort by task id broken. Contributed by Siqi Li.

(cherry picked from commit 4cec9975c0e2c2fcca090888dc2c5e149c5be1e9)
(cherry picked from commit 3de0bf9a35094f3c2ac216992d861729251b6a3d)
(cherry picked from commit c3b2b377f19293797a88f4636f07e2ab8e18c78b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcc0d465
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcc0d465
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcc0d465

Branch: refs/heads/branch-2.6.1
Commit: dcc0d4658c9bc7f488cecdcbc05e9216234b36aa
Parents: 752e3da
Author: Akira Ajisaka 
Authored: Sat Apr 11 01:29:08 2015 +0900
Committer: Vinod Kumar Vavilapalli 
Committed: Tue Sep 1 21:40:06 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt   | 2 ++
 .../java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java  | 2 +-
 .../java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java | 2 +-
 3 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcc0d465/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ff1e597..6e35675 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -24,6 +24,8 @@ Release 2.6.1 - UNRELEASED
 MAPREDUCE-6303. Read timeout when retrying a fetch error can be fatal 
 to a reducer. (Jason Lowe via junping_du)
 
+MAPREDUCE-6300. Task list sort by task id broken. (Siqi Li via aajisaka)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcc0d465/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
index 0212ae4..9648527 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
@@ -43,7 +43,7 @@ public class TasksPage extends AppView {
   .append(", bProcessing: true")
 
   .append("\n, aoColumnDefs: [\n")
-  .append("{'sType':'numeric', 'aTargets': [0]")
+  .append("{'sType':'string', 'aTargets': [0]")
   .append(", 'mRender': parseHadoopID }")
 
   .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [1]")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcc0d465/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
index 4e2b687..2619dff 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
@@ -76,7 +76,7 @@ public class HsTasksPage extends HsView {
 .append(", bProcessing: true")
 
 .append("\n, aoColumnDefs: [\n")
-.append("{'sType':'numeric', 'aTargets': [ 0 ]")
+.append("{'sType':'string', 'aTargets': [ 0 ]")
 .append(", 'mRender': parseHadoopID }")
 
 .append(", {'sType':'numeric', 'aTargets': [ 4")



[30/50] [abbrv] hadoop git commit: HADOOP-12346. Increase some default timeouts / retries for S3a connector. (Sean Mackrory via Lei (Eddy) Xu)

2015-09-01 Thread zhz
HADOOP-12346. Increase some default timeouts / retries for S3a connector. (Sean 
Mackrory via Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ab2d19f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ab2d19f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ab2d19f

Branch: refs/heads/HDFS-7285
Commit: 6ab2d19f5c010ab1d318214916ba95daa91a4dbf
Parents: bdbe53c
Author: Lei Xu 
Authored: Sat Aug 29 09:57:27 2015 -0700
Committer: Lei Xu 
Committed: Sat Aug 29 09:59:30 2015 -0700

--
 .../hadoop-common/src/main/resources/core-default.xml| 4 ++--
 .../src/main/java/org/apache/hadoop/fs/s3a/Constants.java| 4 ++--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md   | 4 ++--
 3 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ab2d19f/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index cef32d3..b813aa9 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -793,7 +793,7 @@ for ldap providers in the same way as above does.
 
 
   fs.s3a.attempts.maximum
-  10
+  20
   How many times we should retry commands on transient 
errors.
 
 
@@ -805,7 +805,7 @@ for ldap providers in the same way as above does.
 
 
   fs.s3a.connection.timeout
-  5
+  20
   Socket connection timeout in milliseconds.
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ab2d19f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index fe8dd77..fa81d93 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -45,7 +45,7 @@ public class Constants {
 
   // number of times we should retry errors
   public static final String MAX_ERROR_RETRIES = "fs.s3a.attempts.maximum";
-  public static final int DEFAULT_MAX_ERROR_RETRIES = 10;
+  public static final int DEFAULT_MAX_ERROR_RETRIES = 20;
 
   // seconds until we give up trying to establish a connection to s3
   public static final String ESTABLISH_TIMEOUT = 
"fs.s3a.connection.establish.timeout";
@@ -53,7 +53,7 @@ public class Constants {
   
   // seconds until we give up on a connection to s3
   public static final String SOCKET_TIMEOUT = "fs.s3a.connection.timeout";
-  public static final int DEFAULT_SOCKET_TIMEOUT = 5;
+  public static final int DEFAULT_SOCKET_TIMEOUT = 20;
 
   // number of records to get while paging through a directory listing
   public static final String MAX_PAGING_KEYS = "fs.s3a.paging.maximum";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ab2d19f/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 5d45e0a..6df15e6 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -206,7 +206,7 @@ If you do any of these: change your credentials immediately!
 
 
   fs.s3a.attempts.maximum
-  10
+  20
   How many times we should retry commands on transient 
errors.
 
 
@@ -218,7 +218,7 @@ If you do any of these: change your credentials immediately!
 
 
   fs.s3a.connection.timeout
-  5
+  20
   Socket connection timeout in milliseconds.
 
 



[19/50] [abbrv] hadoop git commit: HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900. (Colin Patrick McCabe via yliu)

2015-09-01 Thread zhz
HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900. (Colin 
Patrick McCabe via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e166c038
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e166c038
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e166c038

Branch: refs/heads/HDFS-7285
Commit: e166c038c0aaa57b245f985a1c0fadd5fe33c384
Parents: 035ed26
Author: yliu 
Authored: Fri Aug 28 10:54:55 2015 +0800
Committer: yliu 
Committed: Fri Aug 28 10:54:55 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop/hdfs/server/namenode/XAttrFormat.java  | 14 ++
 .../hadoop/hdfs/server/namenode/TestXAttrFeature.java | 12 
 3 files changed, 25 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e166c038/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9cc3326..b699fceb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1243,6 +1243,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8969. Clean up findbugs warnings for HDFS-8823 and HDFS-8932.
 (Anu Engineer via wheat9)
 
+HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900.
+(Colin Patrick McCabe via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e166c038/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
index 6167dac..7e704d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
@@ -61,12 +61,15 @@ class XAttrFormat {
 for (int i = 0; i < attrs.length;) {
   XAttr.Builder builder = new XAttr.Builder();
   // big-endian
-  int v = Ints.fromBytes(attrs[i++], attrs[i++], attrs[i++], attrs[i++]);
+  int v = Ints.fromBytes(attrs[i], attrs[i + 1],
+  attrs[i + 2], attrs[i + 3]);
+  i += 4;
   int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK;
   int nid = v & XATTR_NAME_MASK;
   builder.setNameSpace(XATTR_NAMESPACE_VALUES[ns]);
   builder.setName(XAttrStorage.getName(nid));
-  int vlen = (attrs[i++] << 8) | attrs[i++];
+  int vlen = ((0xff & attrs[i]) << 8) | (0xff & attrs[i + 1]);
+  i += 2;
   if (vlen > 0) {
 byte[] value = new byte[vlen];
 System.arraycopy(attrs, i, value, 0, vlen);
@@ -94,12 +97,15 @@ class XAttrFormat {
 XAttr xAttr = XAttrHelper.buildXAttr(prefixedName);
 for (int i = 0; i < attrs.length;) {
   // big-endian
-  int v = Ints.fromBytes(attrs[i++], attrs[i++], attrs[i++], attrs[i++]);
+  int v = Ints.fromBytes(attrs[i], attrs[i + 1],
+  attrs[i + 2], attrs[i + 3]);
+  i += 4;
   int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK;
   int nid = v & XATTR_NAME_MASK;
   XAttr.NameSpace namespace = XATTR_NAMESPACE_VALUES[ns];
   String name = XAttrStorage.getName(nid);
-  int vlen = (attrs[i++] << 8) | attrs[i++];
+  int vlen = ((0xff & attrs[i]) << 8) | (0xff & attrs[i + 1]);
+  i += 2;
   if (xAttr.getNameSpace() == namespace &&
   xAttr.getName().equals(name)) {
 if (vlen > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e166c038/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
index fcb157e..5b0922d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
@@ -43,6 +43,14 @@ public class TestXAttrFeature {
   static final String name7 = "raw.a7";
   static final byte[] value7 = {0x011, 0x012, 0x013};
   static final String name8 = "user.a8";
+ 

[03/50] [abbrv] hadoop git commit: HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)

2015-09-01 Thread zhz
HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eee0d456
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eee0d456
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eee0d456

Branch: refs/heads/HDFS-7285
Commit: eee0d4563c62647cfaaed6605ee713aaf69add78
Parents: af78767
Author: yliu 
Authored: Tue Aug 25 16:16:09 2015 +0800
Committer: yliu 
Committed: Tue Aug 25 16:16:09 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../org/apache/hadoop/hdfs/XAttrHelper.java |  13 +-
 .../BlockStoragePolicySuite.java|   5 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  29 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  60 ---
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../hdfs/server/namenode/INodeDirectory.java|  11 +-
 .../server/namenode/SerialNumberManager.java|  44 --
 .../hdfs/server/namenode/SerialNumberMap.java   |  79 ++
 .../hdfs/server/namenode/XAttrFeature.java  |  78 +-
 .../hdfs/server/namenode/XAttrFormat.java   | 155 +++
 .../server/namenode/XAttrPermissionFilter.java  |   6 +-
 .../hdfs/server/namenode/XAttrStorage.java  |  62 +++-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|   6 +-
 .../src/main/resources/hdfs-default.xml |   4 +-
 .../hdfs/server/namenode/TestStartup.java   |  27 +---
 .../hdfs/server/namenode/TestXAttrFeature.java  | 107 +
 18 files changed, 502 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7aadcc6..2c47b50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -833,6 +833,8 @@ Release 2.8.0 - UNRELEASED
 ReplicaUnderConstruction as a separate class and replicas as an array.
 (jing9)
 
+HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9b14168..e6802a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -318,6 +318,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
   public static final String  DFS_NAMENODE_MAX_XATTR_SIZE_KEY = 
"dfs.namenode.fs-limits.max-xattr-size";
   public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
+  public static final int DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT = 32768;
 
 
   //Following keys have no defaults

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
index 5cafb3c..2655c40 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
@@ -130,7 +130,7 @@ public class XAttrHelper {
 }
 Map xAttrMap = Maps.newHashMap();
 for (XAttr xAttr : xAttrs) {
-  String name = getPrefixName(xAttr);
+  String name = getPrefixedName(xAttr);
   byte[] value = xAttr.getValue();
   if (value == null) {
 value = new byte[0];
@@ -144,13 +144,16 @@ public class XAttrHelper {
   /**
* Get name with prefix from XAttr
*/
-  public static String getPrefixName(XAttr xAttr) {
+  public static String getPrefixedName(XAttr xAttr) {
 if (xAttr == null) {
   return null;
 }
-
-String namespace = xAttr.getNameSpace().toString();
-return 

[05/50] [abbrv] hadoop git commit: HDFS-8951. Move the shortcircuit package to hdfs-client. Contributed by Mingliang Liu.

2015-09-01 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c992bcf9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
deleted file mode 100644
index 4977fd7..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode;
-
-import java.io.BufferedInputStream;
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.EOFException;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.DataChecksum;
-
-import com.google.common.annotations.VisibleForTesting;
-
-
-
-/**
- * BlockMetadataHeader manages metadata for data blocks on Datanodes.
- * This is not related to the Block related functionality in Namenode.
- * The biggest part of data block metadata is CRC for the block.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class BlockMetadataHeader {
-  private static final Log LOG = LogFactory.getLog(BlockMetadataHeader.class);
-
-  public static final short VERSION = 1;
-  
-  /**
-   * Header includes everything except the checksum(s) themselves.
-   * Version is two bytes. Following it is the DataChecksum
-   * that occupies 5 bytes. 
-   */
-  private final short version;
-  private DataChecksum checksum = null;
-
-  private static final HdfsConfiguration conf = new HdfsConfiguration();
-
-  @VisibleForTesting
-  public BlockMetadataHeader(short version, DataChecksum checksum) {
-this.checksum = checksum;
-this.version = version;
-  }
-  
-  /** Get the version */
-  public short getVersion() {
-return version;
-  }
-
-  /** Get the checksum */
-  public DataChecksum getChecksum() {
-return checksum;
-  }
-
-  /**
-   * Read the checksum header from the meta file.
-   * @return the data checksum obtained from the header.
-   */
-  public static DataChecksum readDataChecksum(File metaFile) throws 
IOException {
-DataInputStream in = null;
-try {
-  in = new DataInputStream(new BufferedInputStream(
-new FileInputStream(metaFile), DFSUtil.getIoFileBufferSize(conf)));
-  return readDataChecksum(in, metaFile);
-} finally {
-  IOUtils.closeStream(in);
-}
-  }
-
-  /**
-   * Read the checksum header from the meta input stream.
-   * @return the data checksum obtained from the header.
-   */
-  public static DataChecksum readDataChecksum(final DataInputStream metaIn,
-  final Object name) throws IOException {
-// read and handle the common header here. For now just a version
-final BlockMetadataHeader header = readHeader(metaIn);
-if (header.getVersion() != VERSION) {
-  LOG.warn("Unexpected meta-file version for " + name
-  + ": version in file is " + header.getVersion()
-  + " but expected version is " + VERSION);
-}
-return header.getChecksum();
-  }
-
-  /**
-   * Read the header without changing the position of the FileChannel.
-   *
-   * @param fc The FileChannel to read.
-   * @return the Metadata Header.
-   * @throws IOException on error.
-   */
-  public static BlockMetadataHeader preadHeader(FileChannel fc)
-  throws IOException {
-final byte arr[] = new byte[getHeaderSize()];
-ByteBuffer buf = 

[50/50] [abbrv] hadoop git commit: Merge remote-tracking branch 'apache/trunk' into HDFS-7285

2015-09-01 Thread zhz
Merge remote-tracking branch 'apache/trunk' into HDFS-7285


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53358fe6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53358fe6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53358fe6

Branch: refs/heads/HDFS-7285
Commit: 53358fe680a11c1b66a7f60733d11c1f4efe0232
Parents: ab56fcd 2e251a7
Author: Zhe Zhang 
Authored: Tue Sep 1 00:29:55 2015 -0700
Committer: Zhe Zhang 
Committed: Tue Sep 1 14:48:37 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   9 +
 hadoop-common-project/hadoop-common/pom.xml |  19 +-
 .../fs/CommonConfigurationKeysPublic.java   |   7 +
 .../src/main/resources/core-default.xml |  14 +-
 .../src/site/markdown/FileSystemShell.md|  13 +-
 .../java/org/apache/hadoop/fs/test-untar.tar| Bin 20480 -> 0 bytes
 .../java/org/apache/hadoop/fs/test-untar.tgz| Bin 2024 -> 0 bytes
 .../fs/viewfs/ViewFileSystemBaseTest.java   |   2 +-
 .../apache/hadoop/fs/viewfs/ViewFsBaseTest.java |   2 +-
 .../src/test/resources/test-untar.tar   | Bin 0 -> 20480 bytes
 .../src/test/resources/test-untar.tgz   | Bin 0 -> 2024 bytes
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   5 +
 .../org/apache/hadoop/hdfs/BlockReader.java | 110 +++
 .../apache/hadoop/hdfs/BlockReaderLocal.java| 748 +++
 .../hadoop/hdfs/BlockReaderLocalLegacy.java | 743 ++
 .../org/apache/hadoop/hdfs/BlockReaderUtil.java |  57 ++
 .../org/apache/hadoop/hdfs/ClientContext.java   | 196 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  68 ++
 .../apache/hadoop/hdfs/ExternalBlockReader.java | 126 
 .../apache/hadoop/hdfs/KeyProviderCache.java| 112 +++
 .../java/org/apache/hadoop/hdfs/PeerCache.java  | 291 
 .../apache/hadoop/hdfs/RemoteBlockReader.java   | 517 +
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  | 485 
 .../hadoop/hdfs/client/BlockReportOptions.java  |  59 ++
 .../hdfs/client/HdfsClientConfigKeys.java   |  13 +
 .../hdfs/protocol/BlockLocalPathInfo.java   |  70 ++
 .../hdfs/protocol/ClientDatanodeProtocol.java   | 152 
 .../InvalidEncryptionKeyException.java  |  40 +
 .../protocol/datatransfer/PacketHeader.java | 214 ++
 .../protocol/datatransfer/PacketReceiver.java   | 310 
 .../protocolPB/ClientDatanodeProtocolPB.java|  37 +
 .../ClientDatanodeProtocolTranslatorPB.java | 326 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  13 +
 .../token/block/BlockTokenSelector.java |  48 ++
 .../hdfs/util/ByteBufferOutputStream.java   |  49 ++
 .../hadoop/hdfs/web/URLConnectionFactory.java   |  30 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  15 +-
 .../hdfs/web/oauth2/AccessTokenProvider.java|  66 ++
 .../hdfs/web/oauth2/AccessTokenTimer.java   | 103 +++
 .../ConfCredentialBasedAccessTokenProvider.java |  62 ++
 ...onfRefreshTokenBasedAccessTokenProvider.java | 146 
 .../CredentialBasedAccessTokenProvider.java | 135 
 .../oauth2/OAuth2ConnectionConfigurator.java|  79 ++
 .../hadoop/hdfs/web/oauth2/OAuth2Constants.java |  46 ++
 .../apache/hadoop/hdfs/web/oauth2/Utils.java|  63 ++
 .../hadoop/hdfs/web/oauth2/package-info.java|  26 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  22 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   6 +
 .../bkjournal/BookKeeperEditLogInputStream.java |   2 +-
 .../org/apache/hadoop/hdfs/BlockReader.java | 110 ---
 .../apache/hadoop/hdfs/BlockReaderLocal.java| 746 --
 .../hadoop/hdfs/BlockReaderLocalLegacy.java | 740 --
 .../org/apache/hadoop/hdfs/BlockReaderUtil.java |  57 --
 .../org/apache/hadoop/hdfs/ClientContext.java   | 195 -
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   1 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  68 +-
 .../apache/hadoop/hdfs/ExternalBlockReader.java | 126 
 .../apache/hadoop/hdfs/KeyProviderCache.java| 111 ---
 .../java/org/apache/hadoop/hdfs/PeerCache.java  | 290 ---
 .../apache/hadoop/hdfs/RemoteBlockReader.java   | 513 -
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  | 482 
 .../hadoop/hdfs/client/BlockReportOptions.java  |  59 --
 .../hdfs/protocol/BlockLocalPathInfo.java   |  70 --
 .../hdfs/protocol/ClientDatanodeProtocol.java   | 152 
 .../hadoop/hdfs/protocol/LayoutVersion.java |   2 +-
 .../InvalidEncryptionKeyException.java  |  40 -
 .../protocol/datatransfer/PacketHeader.java | 214 --
 .../protocol/datatransfer/PacketReceiver.java   | 310 
 .../hdfs/protocol/datatransfer/Receiver.java|  15 +-
 

[21/50] [abbrv] hadoop git commit: HDFS-8865. Improve quota initialization performance. Contributed by Kihwal Lee.

2015-09-01 Thread zhz
HDFS-8865. Improve quota initialization performance. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6ceee9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6ceee9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6ceee9b

Branch: refs/heads/HDFS-7285
Commit: b6ceee9bf42eec15891f60a014bbfa47e03f563c
Parents: beb65c9
Author: Kihwal Lee 
Authored: Fri Aug 28 13:14:35 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Aug 28 13:14:35 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../hdfs/server/namenode/BackupImage.java   |   8 +-
 .../hadoop/hdfs/server/namenode/FSImage.java| 153 ---
 .../hdfs/server/namenode/QuotaCounts.java   |  10 +-
 .../src/main/resources/hdfs-default.xml |  10 ++
 .../namenode/TestDiskspaceQuotaUpdate.java  |  64 
 .../namenode/TestFSImageWithSnapshot.java   |   4 +-
 8 files changed, 196 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6ceee9b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b699fceb..67a6a6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -853,6 +853,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8962. Clean up checkstyle warnings in o.a.h.hdfs.DfsClientConf.
 (Mingliang Liu via wheat9)
 
+HDFS-8865. Improve quota initialization performance. (kihwal)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6ceee9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e6802a5..57a5aed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -214,6 +214,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = 
"dfs.namenode.edits.dir.minimum";
   public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
+  public static final String  DFS_NAMENODE_QUOTA_INIT_THREADS_KEY = 
"dfs.namenode.quota.init-threads";
+  public static final int DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT = 4;
 
   public static final String  
DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD = 
"dfs.namenode.edit.log.autoroll.multiplier.threshold";
   public static final float   
DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT = 2.0f;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6ceee9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index ae4e874..8aee0bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -24,6 +24,7 @@ import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -82,6 +83,8 @@ public class BackupImage extends FSImage {
   
   private FSNamesystem namesystem;
 
+  private int quotaInitThreads;
+
   /**
* Construct a backup image.
* @param conf Configuration
@@ -91,6 +94,9 @@ public class BackupImage extends FSImage {
 super(conf);
 storage.setDisablePreUpgradableLayoutCheck(true);
 bnState = BNState.DROP_UNTIL_NEXT_ROLL;
+quotaInitThreads = conf.getInt(
+DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_KEY,
+DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT);
   }
 
  

[34/50] [abbrv] hadoop git commit: HDFS-8990. Move RemoteBlockReader to hdfs-client module. Contributed by Mingliang Liu.

2015-09-01 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
deleted file mode 100644
index 2a77cb6..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
+++ /dev/null
@@ -1,477 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.nio.channels.ReadableByteChannel;
-import java.util.EnumSet;
-import java.util.UUID;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.ReadOption;
-import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
-import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver;
-import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
-import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
-import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * This is a wrapper around connection to datanode
- * and understands checksum, offset etc.
- *
- * Terminology:
- * 
- * block
- *   The hdfs block, typically large (~64MB).
- *   
- * chunk
- *   A block is divided into chunks, each comes with a checksum.
- *   We want transfers to be chunk-aligned, to be able to
- *   verify checksums.
- *   
- * packet
- *   A grouping of chunks used for transport. It contains a
- *   header, followed by checksum data, followed by real data.
- *   
- * 
- * Please see DataNode for the RPC specification.
- *
- * This is a new implementation introduced in Hadoop 0.23 which
- * is more efficient and simpler than the older BlockReader
- * implementation. It should be renamed to RemoteBlockReader
- * once we are confident in it.
- */
-@InterfaceAudience.Private
-public class RemoteBlockReader2  implements BlockReader {
-
-  static final Log LOG = LogFactory.getLog(RemoteBlockReader2.class);
-  
-  final private Peer peer;
-  final private DatanodeID datanodeID;
-  final private PeerCache peerCache;
-  final private long blockId;
-  private final ReadableByteChannel in;
-  private DataChecksum checksum;
-  
-  private final PacketReceiver packetReceiver = new PacketReceiver(true);
-  private ByteBuffer curDataSlice = null;
-
-  /** offset in block of the last chunk received */
-  private long lastSeqNo = -1;
-
-  /** offset in block where reader wants to actually read */
-  private long startOffset;
-  private final String filename;
-
-  private final int bytesPerChecksum;
-  private final int checksumSize;
-
-  /**
-   * The total number of bytes we need to transfer from the DN.
-   * This is the amount that the user has 

[38/50] [abbrv] hadoop git commit: HDFS-8946. Improve choosing datanode storage for block placement. (yliu)

2015-09-01 Thread zhz
HDFS-8946. Improve choosing datanode storage for block placement. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fa41d9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fa41d9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fa41d9d

Branch: refs/heads/HDFS-7285
Commit: 8fa41d9dd4b923bf4141f019414a1a8b079124c6
Parents: 4eaa7fd
Author: yliu 
Authored: Tue Sep 1 08:52:50 2015 +0800
Committer: yliu 
Committed: Tue Sep 1 08:52:50 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../BlockPlacementPolicyDefault.java| 147 ++-
 .../blockmanagement/DatanodeDescriptor.java |  36 +++--
 .../blockmanagement/TestReplicationPolicy.java  |  26 +++-
 4 files changed, 93 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fa41d9d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ef8fac5..6584c84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -870,6 +870,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8990. Move RemoteBlockReader to hdfs-client module.
 (Mingliang via wheat9)
 
+HDFS-8946. Improve choosing datanode storage for block placement. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fa41d9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 6d7a765..f761150 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -26,12 +26,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
@@ -458,19 +455,18 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 for (Iterator> iter = storageTypes
 .entrySet().iterator(); iter.hasNext(); ) {
   Map.Entry entry = iter.next();
-  for (DatanodeStorageInfo localStorage : DFSUtil.shuffle(
-  localDatanode.getStorageInfos())) {
-StorageType type = entry.getKey();
-if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize,
-results, type) >= 0) {
-  int num = entry.getValue();
-  if (num == 1) {
-iter.remove();
-  } else {
-entry.setValue(num - 1);
-  }
-  return localStorage;
+  DatanodeStorageInfo localStorage = chooseStorage4Block(
+  localDatanode, blocksize, results, entry.getKey());
+  if (localStorage != null) {
+// add node and related nodes to excludedNode
+addToExcludedNodes(localDatanode, excludedNodes);
+int num = entry.getValue();
+if (num == 1) {
+  iter.remove();
+} else {
+  entry.setValue(num - 1);
 }
+return localStorage;
   }
 }
   } 
@@ -651,7 +647,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 boolean avoidStaleNodes,
 EnumMap storageTypes)
 throws NotEnoughReplicasException {
-  
+
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
 StringBuilder builder = 

[14/50] [abbrv] hadoop git commit: HADOOP-12362. Set hadoop.tmp.dir and hadoop.log.dir in pom. Contributed by Charlie Helin.

2015-09-01 Thread zhz
HADOOP-12362. Set hadoop.tmp.dir and hadoop.log.dir in pom. Contributed by 
Charlie Helin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90fe7bcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90fe7bcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90fe7bcc

Branch: refs/heads/HDFS-7285
Commit: 90fe7bcc3b30f8497d7a5124c91c5aea28f2fccb
Parents: 50fa383
Author: Andrew Wang 
Authored: Thu Aug 27 10:17:00 2015 -0700
Committer: Andrew Wang 
Committed: Thu Aug 27 10:17:00 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 hadoop-project/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 4 ++--
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90fe7bcc/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0ec4ed6..95eb677 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1079,6 +1079,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12347. Fix mismatch parameter name in javadocs of
 AuthToken#setMaxInactives (xyao)
 
+HADOOP-12362. Set hadoop.tmp.dir and hadoop.log.dir in pom.
+(Charlie Helin via wang)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90fe7bcc/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 023b1c4..86102c6 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1121,14 +1121,14 @@
   
   
 
+${project.build.directory}/log
+${project.build.directory}/tmp
 
 
 ${test.build.dir}
-${hadoop.tmp.dir}
 ${test.build.data}
 ${test.build.webapps}
 ${test.cache.data}
-${hadoop.log.dir}
 ${test.build.classes}
 
 true

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90fe7bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index 95bcb4f..52994e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -187,14 +187,14 @@
 
 
 
+  ${project.build.directory}/log
+  ${project.build.directory}/tmp
 
   
   ${test.build.dir}
-  ${hadoop.tmp.dir}
   ${test.build.data}
   ${test.build.webapps}
   ${test.cache.data}
-  ${hadoop.log.dir}
   ${test.build.classes}
 
   true



[12/50] [abbrv] hadoop git commit: HDFS-8961. Investigate lock issue in o.a.h.hdfs.shortcircuit.DfsClientShmManager.EndpointShmManager. Contributed by Mingliang Liu.

2015-09-01 Thread zhz
HDFS-8961. Investigate lock issue in 
o.a.h.hdfs.shortcircuit.DfsClientShmManager.EndpointShmManager. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e5f69e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e5f69e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e5f69e8

Branch: refs/heads/HDFS-7285
Commit: 1e5f69e85c035f9507e8b788df0b3ce20290a770
Parents: 7e971b7
Author: Haohui Mai 
Authored: Thu Aug 27 09:13:20 2015 -0700
Committer: Haohui Mai 
Committed: Thu Aug 27 09:13:20 2015 -0700

--
 .../dev-support/findbugsExcludeFile.xml   | 10 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop-hdfs/dev-support/findbugsExcludeFile.xml   | 10 --
 3 files changed, 13 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e5f69e8/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index ba6453d..036ac09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -21,4 +21,14 @@
 
 
   
+  
+
+
+
+  
+  
+
+
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e5f69e8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0d85995..e779d37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1237,6 +1237,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8682. Should not remove decommissioned node,while calculating the
 number of live/dead decommissioned node. (J. Andreina via vinayakumarb)
 
+HDFS-8961. Investigate lock issue in o.a.h.hdfs.shortcircuit.
+DfsClientShmManager.EndpointShmManager. (Mingliang Liu via wheat9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e5f69e8/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 224d2fb..60029e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -176,16 +176,6 @@


  
-
-  
-  
-  
-
-
-  
-  
-  
-
 
 
   



[08/50] [abbrv] hadoop git commit: HDFS-8248. Store INodeId instead of the INodeFile object in BlockInfoContiguous. Contributed by Haohui Mai.

2015-09-01 Thread zhz
HDFS-8248. Store INodeId instead of the INodeFile object in 
BlockInfoContiguous. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cbbfa22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cbbfa22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cbbfa22

Branch: refs/heads/HDFS-7285
Commit: 4cbbfa2220e884e91bf18ad1cc2f3b11f895f8c9
Parents: f44b599
Author: Haohui Mai 
Authored: Mon Aug 24 14:44:08 2015 -0700
Committer: Haohui Mai 
Committed: Wed Aug 26 18:14:29 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/BlockCollection.java |  5 ++
 .../hdfs/server/blockmanagement/BlockInfo.java  | 25 +
 .../server/blockmanagement/BlockManager.java| 19 +++
 .../hdfs/server/blockmanagement/BlocksMap.java  | 10 ++--
 .../blockmanagement/DecommissionManager.java|  7 ++-
 .../SequentialBlockIdGenerator.java |  5 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 24 ++---
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  8 +--
 .../hadoop/hdfs/server/namenode/INodeId.java|  1 +
 .../hadoop/hdfs/server/namenode/Namesystem.java |  3 ++
 .../server/blockmanagement/TestBlockInfo.java   | 11 ++--
 .../blockmanagement/TestBlockManager.java   | 15 ++
 .../blockmanagement/TestReplicationPolicy.java  | 10 +++-
 .../TestCommitBlockSynchronization.java |  9 ++--
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 54 ++--
 .../snapshot/TestSnapshotBlocksMap.java | 30 +++
 .../namenode/snapshot/TestSnapshotDeletion.java |  9 ++--
 18 files changed, 154 insertions(+), 94 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cbbfa22/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 54c2d21..e432da0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -844,6 +844,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8896. DataNode object isn't GCed when shutdown, because it has GC
 root in ShutdownHookManager. (Walter Su via jing9)
 
+HDFS-8248. Store INodeId instead of the INodeFile object in
+BlockInfoContiguous. (wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cbbfa22/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 3952cc6..95d9983 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -79,4 +79,9 @@ public interface BlockCollection {
* @return whether the block collection is under construction.
*/
   public boolean isUnderConstruction();
+
+  /**
+   * @return the id for the block collection
+   */
+  long getId();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cbbfa22/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index a9dfdde..706cbcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -28,6 +28,8 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.LightWeightGSet;
 
+import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
+
 /**
  * BlockInfo class maintains for a given block
  * the {@link BlockCollection} it is part of and datanodes where the replicas 
of
@@ -40,11 +42,14 @@ public abstract class  BlockInfo extends Block
   public static final BlockInfo[] EMPTY_ARRAY = {};
 
   /**
-   * 

[35/50] [abbrv] hadoop git commit: HDFS-8990. Move RemoteBlockReader to hdfs-client module. Contributed by Mingliang Liu.

2015-09-01 Thread zhz
HDFS-8990. Move RemoteBlockReader to hdfs-client module. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/826ae1c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/826ae1c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/826ae1c2

Branch: refs/heads/HDFS-7285
Commit: 826ae1c26d31f87d88efc920b271bec7eec2e17a
Parents: caa04de
Author: Haohui Mai 
Authored: Mon Aug 31 13:54:14 2015 -0700
Committer: Haohui Mai 
Committed: Mon Aug 31 13:54:14 2015 -0700

--
 .../apache/hadoop/hdfs/RemoteBlockReader.java   | 512 +++
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  | 480 +
 .../protocol/datatransfer/PacketHeader.java | 214 
 .../protocol/datatransfer/PacketReceiver.java   | 310 +++
 .../hdfs/util/ByteBufferOutputStream.java   |  49 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   1 -
 .../apache/hadoop/hdfs/RemoteBlockReader.java   | 508 --
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  | 477 -
 .../protocol/datatransfer/PacketHeader.java | 214 
 .../protocol/datatransfer/PacketReceiver.java   | 310 ---
 .../hdfs/util/ByteBufferOutputStream.java   |  49 --
 .../hdfs/TestClientBlockVerification.java   |   4 +-
 13 files changed, 1570 insertions(+), 1561 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/826ae1c2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
new file mode 100644
index 000..7509da5
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
@@ -0,0 +1,512 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.EnumSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FSInputChecker;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.ReadOption;
+import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
+import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * @deprecated this is an old implementation that is being left around
+ * in case any issues spring up with the new {@link RemoteBlockReader2} 
implementation.
+ * It will be removed in the 

[13/50] [abbrv] hadoop git commit: Revert "MAPREDUCE-6455. Unable to use surefire > 2.18. (Charlie Helin via kasha)"

2015-09-01 Thread zhz
Revert "MAPREDUCE-6455. Unable to use surefire > 2.18. (Charlie Helin via 
kasha)"

This reverts commit 61bf9cae6f3882c6e9a9222f59457b9be91e3018.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50fa3837
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50fa3837
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50fa3837

Branch: refs/heads/HDFS-7285
Commit: 50fa38370468bf44031607dfed49b052cfc6d1e3
Parents: 1e5f69e
Author: Andrew Wang 
Authored: Thu Aug 27 10:15:19 2015 -0700
Committer: Andrew Wang 
Committed: Thu Aug 27 10:15:19 2015 -0700

--
 .../src/main/java/org/apache/hadoop/conf/Configuration.java | 9 ++---
 hadoop-mapreduce-project/CHANGES.txt| 2 --
 .../src/main/java/org/apache/hadoop/mapred/TaskLog.java | 4 +---
 3 files changed, 3 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50fa3837/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 6f1d3f8..0b45429 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -988,13 +988,8 @@ public class Configuration implements 
Iterable>,
   } catch(SecurityException se) {
 LOG.warn("Unexpected SecurityException in Configuration", se);
   }
-  if (val == null || val.isEmpty()) {
-String raw = getRaw(var);
-if (raw != null) {
-  // if System.getProperty(var) returns an empty string, retain this
-  // value instead of return null
-  val = raw;
-}
+  if (val == null) {
+val = getRaw(var);
   }
   if (val == null) {
 return eval; // return literal ${var}: var is unbound

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50fa3837/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 305b29e..361a19b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -275,8 +275,6 @@ Trunk (Unreleased)
 MAPREDUCE-5801. Uber mode's log message is missing a vcore reason
 (Steven Wong via aw)
 
-MAPREDUCE-6455. Unable to use surefire > 2.18. (Charlie Helin via kasha)
-
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50fa3837/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
index b8bb2f2..e07b5be 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
@@ -181,9 +181,7 @@ public class TaskLog {
   }
 
   static String getBaseLogDir() {
-String logDir = System.getProperty("hadoop.log.dir");
-// file is treating "" different from null {@see File#File(String, String)}
-return logDir == null || logDir.isEmpty() ? null : logDir;
+return System.getProperty("hadoop.log.dir");
   }
 
   static File getAttemptDir(TaskAttemptID taskid, boolean isCleanup) {



[45/50] [abbrv] hadoop git commit: Merge remote-tracking branch 'apache/trunk' into HDFS-7285

2015-09-01 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab56fcdb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
index 1b23600,000..3c77120
mode 100644,00..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
@@@ -1,248 -1,0 +1,250 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.hadoop.hdfs.server.blockmanagement;
 +
 +import org.apache.hadoop.hdfs.DFSTestUtil;
 +import org.apache.hadoop.hdfs.protocol.Block;
 +import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
 +import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 +import org.junit.Assert;
 +import org.junit.Test;
 +import org.mockito.internal.util.reflection.Whitebox;
 +
 +import java.io.DataOutput;
 +import java.io.DataOutputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.nio.ByteBuffer;
 +
 +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.NUM_DATA_BLOCKS;
 +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.NUM_PARITY_BLOCKS;
 +import static org.junit.Assert.assertArrayEquals;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.fail;
 +
 +/**
 + * Test {@link BlockInfoStriped}
 + */
 +public class TestBlockInfoStriped {
 +  private static final int TOTAL_NUM_BLOCKS = NUM_DATA_BLOCKS + 
NUM_PARITY_BLOCKS;
 +  private static final long BASE_ID = -1600;
 +  private static final Block baseBlock = new Block(BASE_ID);
 +  private static final ErasureCodingPolicy testECPolicy
 +  = ErasureCodingPolicyManager.getSystemDefaultPolicy();
 +  private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
 +  testECPolicy);
 +
 +  private Block[] createReportedBlocks(int num) {
 +Block[] blocks = new Block[num];
 +for (int i = 0; i < num; i++) {
 +  blocks[i] = new Block(BASE_ID + i);
 +}
 +return blocks;
 +  }
 +
 +  /**
 +   * Test adding storage and reported block
 +   */
 +  @Test
 +  public void testAddStorage() {
 +// first add NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS storages, i.e., a 
complete
 +// group of blocks/storages
 +DatanodeStorageInfo[] storageInfos = 
DFSTestUtil.createDatanodeStorageInfos(
 +TOTAL_NUM_BLOCKS);
 +Block[] blocks = createReportedBlocks(TOTAL_NUM_BLOCKS);
 +int i = 0;
 +for (; i < storageInfos.length; i += 2) {
 +  info.addStorage(storageInfos[i], blocks[i]);
 +  Assert.assertEquals(i/2 + 1, info.numNodes());
 +}
 +i /= 2;
 +for (int j = 1; j < storageInfos.length; j += 2) {
 +  Assert.assertTrue(info.addStorage(storageInfos[j], blocks[j]));
 +  Assert.assertEquals(i + (j+1)/2, info.numNodes());
 +}
 +
 +// check
 +byte[] indices = (byte[]) Whitebox.getInternalState(info, "indices");
 +Assert.assertEquals(TOTAL_NUM_BLOCKS, info.getCapacity());
 +Assert.assertEquals(TOTAL_NUM_BLOCKS, indices.length);
 +i = 0;
 +for (DatanodeStorageInfo storage : storageInfos) {
 +  int index = info.findStorageInfo(storage);
 +  Assert.assertEquals(i++, index);
 +  Assert.assertEquals(index, indices[index]);
 +}
 +
 +// the same block is reported from the same storage twice
 +i = 0;
 +for (DatanodeStorageInfo storage : storageInfos) {
 +  Assert.assertTrue(info.addStorage(storage, blocks[i++]));
 +}
 +Assert.assertEquals(TOTAL_NUM_BLOCKS, info.getCapacity());
 +Assert.assertEquals(TOTAL_NUM_BLOCKS, info.numNodes());
 +Assert.assertEquals(TOTAL_NUM_BLOCKS, indices.length);
 +i = 0;
 +for (DatanodeStorageInfo storage : storageInfos) {
 +  int index = info.findStorageInfo(storage);
 +  Assert.assertEquals(i++, index);
 +

[28/50] [abbrv] hadoop git commit: HDFS-8925. Move BlockReaderLocal to hdfs-client. Contributed by Mingliang Liu.

2015-09-01 Thread zhz
HDFS-8925. Move BlockReaderLocal to hdfs-client. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2c9b288
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2c9b288
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2c9b288

Branch: refs/heads/HDFS-7285
Commit: e2c9b288b223b9fd82dc12018936e13128413492
Parents: b94b568
Author: Haohui Mai 
Authored: Fri Aug 28 14:20:55 2015 -0700
Committer: Haohui Mai 
Committed: Fri Aug 28 14:38:36 2015 -0700

--
 .../org/apache/hadoop/hdfs/BlockReader.java | 102 +++
 .../apache/hadoop/hdfs/BlockReaderLocal.java| 743 +++
 .../hadoop/hdfs/BlockReaderLocalLegacy.java | 738 ++
 .../org/apache/hadoop/hdfs/BlockReaderUtil.java |  57 ++
 .../org/apache/hadoop/hdfs/ClientContext.java   | 196 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  68 ++
 .../apache/hadoop/hdfs/ExternalBlockReader.java | 120 +++
 .../apache/hadoop/hdfs/KeyProviderCache.java| 112 +++
 .../java/org/apache/hadoop/hdfs/PeerCache.java  | 291 
 .../hadoop/hdfs/client/BlockReportOptions.java  |  59 ++
 .../hdfs/client/HdfsClientConfigKeys.java   |   5 +
 .../hdfs/protocol/BlockLocalPathInfo.java   |  70 ++
 .../hdfs/protocol/ClientDatanodeProtocol.java   | 152 
 .../InvalidEncryptionKeyException.java  |  40 +
 .../protocolPB/ClientDatanodeProtocolPB.java|  37 +
 .../ClientDatanodeProtocolTranslatorPB.java | 326 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  13 +
 .../token/block/BlockTokenSelector.java |  48 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/BlockReader.java | 102 ---
 .../apache/hadoop/hdfs/BlockReaderLocal.java| 741 --
 .../hadoop/hdfs/BlockReaderLocalLegacy.java | 735 --
 .../org/apache/hadoop/hdfs/BlockReaderUtil.java |  57 --
 .../org/apache/hadoop/hdfs/ClientContext.java   | 195 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  67 +-
 .../apache/hadoop/hdfs/ExternalBlockReader.java | 120 ---
 .../apache/hadoop/hdfs/KeyProviderCache.java| 111 ---
 .../java/org/apache/hadoop/hdfs/PeerCache.java  | 290 
 .../hadoop/hdfs/client/BlockReportOptions.java  |  59 --
 .../hdfs/protocol/BlockLocalPathInfo.java   |  70 --
 .../hdfs/protocol/ClientDatanodeProtocol.java   | 152 
 .../InvalidEncryptionKeyException.java  |  40 -
 .../hdfs/protocol/datatransfer/Receiver.java|  15 +-
 .../protocolPB/ClientDatanodeProtocolPB.java|  37 -
 ...tDatanodeProtocolServerSideTranslatorPB.java |   6 +-
 .../ClientDatanodeProtocolTranslatorPB.java | 326 
 ...tNamenodeProtocolServerSideTranslatorPB.java |  14 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   2 +-
 ...rDatanodeProtocolServerSideTranslatorPB.java |   2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  26 +-
 .../token/block/BlockTokenSelector.java |  48 --
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   3 +-
 .../hadoop/hdfs/TestBlockReaderLocal.java   |  30 +-
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java |   2 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   |   2 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|   4 +-
 .../security/token/block/TestBlockToken.java|  10 +-
 .../shortcircuit/TestShortCircuitLocalRead.java |   4 +-
 51 files changed, 3243 insertions(+), 3227 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2c9b288/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
new file mode 100644
index 000..aa3e8ba
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or 

[32/50] [abbrv] hadoop git commit: YARN-2945. Fixing the CHANGES.txt to have the right JIRA number.

2015-09-01 Thread zhz
YARN-2945. Fixing the CHANGES.txt to have the right JIRA number.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf831565
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf831565
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf831565

Branch: refs/heads/HDFS-7285
Commit: cf831565e8344523e1bd0eaf686ed56a2b48b920
Parents: 837fb75
Author: Vinod Kumar Vavilapalli 
Authored: Sun Aug 30 20:01:47 2015 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Sun Aug 30 20:01:47 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf831565/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0b733a4..4201b4f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1453,7 +1453,7 @@ Release 2.7.0 - 2015-04-20
 YARN-2914. [YARN-1492] Potential race condition in Singleton 
implementation of 
 SharedCacheUploaderMetrics, CleanerMetrics, ClientSCMMetrics. (Varun 
Saxena via kasha)
 
-YARN-2964. FSLeafQueue#assignContainer - document the reason for using 
both write and
+YARN-2945. FSLeafQueue#assignContainer - document the reason for using 
both write and
 read locks. (Tsuyoshi Ozawa via kasha)
 
 YARN-2944. InMemorySCMStore can not be instantiated with 
ReflectionUtils#newInstance.



[47/50] [abbrv] hadoop git commit: Merge remote-tracking branch 'apache/trunk' into HDFS-7285

2015-09-01 Thread zhz
Merge remote-tracking branch 'apache/trunk' into HDFS-7285


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab56fcdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab56fcdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab56fcdb

Branch: refs/heads/HDFS-7285
Commit: ab56fcdb1219d03713b408dd3a95d7405635254d
Parents: 164cbe6 cbb2495
Author: Zhe Zhang 
Authored: Thu Aug 27 16:23:41 2015 -0700
Committer: Zhe Zhang 
Committed: Tue Sep 1 14:30:25 2015 -0700

--
 .../server/AuthenticationFilter.java|   63 +-
 .../server/AuthenticationToken.java |   12 +
 .../security/authentication/util/AuthToken.java |   35 +-
 .../server/TestAuthenticationFilter.java|  163 ++-
 hadoop-common-project/hadoop-common/CHANGES.txt |   34 +
 .../src/main/conf/log4j.properties  |   13 +
 .../fs/CommonConfigurationKeysPublic.java   |5 +
 .../java/org/apache/hadoop/fs/CreateFlag.java   |2 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java|   11 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|5 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |   60 +
 .../apache/hadoop/ipc/WritableRpcEngine.java|3 +
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   |   48 +
 .../apache/hadoop/metrics2/lib/MutableStat.java |7 +-
 .../org/apache/hadoop/metrics2/util/MBeans.java |   37 +-
 .../org/apache/hadoop/util/HostsFileReader.java |7 +-
 .../main/java/org/apache/hadoop/util/Shell.java |   11 +-
 .../org/apache/hadoop/util/StringUtils.java |   29 +-
 .../src/main/resources/core-default.xml |9 +
 .../src/site/markdown/HttpAuthentication.md |8 +-
 .../hadoop-common/src/site/markdown/Metrics.md  |2 +
 .../src/site/markdown/SingleCluster.md.vm   |2 +-
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  |   77 +-
 .../org/apache/hadoop/test/MetricsAsserts.java  |2 +-
 .../java/org/apache/hadoop/util/TestShell.java  |   39 +
 .../hadoop-common/src/test/proto/test.proto |7 +
 .../src/test/proto/test_rpc_service.proto   |1 +
 .../dev-support/findbugsExcludeFile.xml |   10 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |   26 +
 .../org/apache/hadoop/hdfs/ExtendedBlockId.java |   82 ++
 .../org/apache/hadoop/hdfs/ReplicaAccessor.java |   88 ++
 .../hadoop/hdfs/ReplicaAccessorBuilder.java |  101 ++
 .../hdfs/client/HdfsClientConfigKeys.java   |   76 +-
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  794 +
 .../hadoop/hdfs/client/impl/package-info.java   |   18 +
 .../org/apache/hadoop/hdfs/net/DomainPeer.java  |  132 +++
 .../java/org/apache/hadoop/hdfs/net/Peer.java   |  123 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |7 +
 .../datatransfer/BlockConstructionStage.java|   62 +
 .../datatransfer/DataTransferProtoUtil.java |  146 +++
 .../datatransfer/DataTransferProtocol.java  |  202 
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |   66 ++
 .../hdfs/protocol/datatransfer/Sender.java  |  261 +
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  254 +
 .../token/block/InvalidBlockTokenException.java |   41 +
 .../server/datanode/BlockMetadataHeader.java|  209 
 .../hdfs/server/datanode/CachingStrategy.java   |   76 ++
 .../hadoop/hdfs/shortcircuit/ClientMmap.java|   75 ++
 .../hadoop/hdfs/shortcircuit/DfsClientShm.java  |  119 ++
 .../hdfs/shortcircuit/DfsClientShmManager.java  |  522 +
 .../hdfs/shortcircuit/DomainSocketFactory.java  |  196 
 .../hdfs/shortcircuit/ShortCircuitCache.java| 1066 +
 .../hdfs/shortcircuit/ShortCircuitReplica.java  |  352 ++
 .../shortcircuit/ShortCircuitReplicaInfo.java   |   64 ++
 .../hdfs/shortcircuit/ShortCircuitShm.java  |  647 +++
 .../hadoop/hdfs/util/ByteArrayManager.java  |  422 +++
 .../hadoop/hdfs/util/ExactSizeInputStream.java  |  125 ++
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  |   46 +
 .../apache/hadoop/hdfs/util/package-info.java   |   18 +
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |   20 +
 .../hdfs/web/resources/CreateFlagParam.java |   48 +
 .../hdfs/web/resources/CreateParentParam.java   |2 +-
 .../src/main/proto/ClientDatanodeProtocol.proto |   33 -
 .../src/main/proto/datatransfer.proto   |4 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  125 +-
 .../dev-support/findbugsExcludeFile.xml |   10 -
 .../hadoop-hdfs/src/CMakeLists.txt  |1 +
 .../apache/hadoop/fs/BlockStorageLocation.java  |   52 -
 .../java/org/apache/hadoop/fs/HdfsVolumeId.java |   73 --
 .../java/org/apache/hadoop/fs/VolumeId.java |   40 -
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |   65 +-
 .../hadoop/hdfs/BlockStorageLocationUtil.java   |  368 --
 

[26/50] [abbrv] hadoop git commit: HDFS-8925. Move BlockReaderLocal to hdfs-client. Contributed by Mingliang Liu.

2015-09-01 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2c9b288/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
deleted file mode 100644
index c16ffdf..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
+++ /dev/null
@@ -1,735 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.security.PrivilegedExceptionAction;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ReadOption;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
-import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
-import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
-import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.DirectBufferPool;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-
-/**
- * BlockReaderLocalLegacy enables local short circuited reads. If the DFS 
client is on
- * the same machine as the datanode, then the client can read files directly
- * from the local file system rather than going through the datanode for better
- * performance. 
- *
- * This is the legacy implementation based on HDFS-2246, which requires
- * permissions on the datanode to be set so that clients can directly access 
the
- * blocks. The new implementation based on HDFS-347 should be preferred on UNIX
- * systems where the required native code has been implemented.
- *
- * {@link BlockReaderLocalLegacy} works as follows:
- * 
- * The client performing short circuit reads must be configured at the
- * datanode.
- * The client gets the path to the file where block is stored using
- * {@link 
org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol#getBlockLocalPathInfo(ExtendedBlock,
 Token)}
- * RPC call
- * Client uses kerberos authentication to connect to the datanode over RPC,
- * if security is enabled.
- * 
- */
-@InterfaceAudience.Private
-class BlockReaderLocalLegacy implements BlockReader {
-  private static final Log LOG = 
LogFactory.getLog(BlockReaderLocalLegacy.class);
-
-  //Stores the cache and proxy for a local datanode.
-  private static class LocalDatanodeInfo {
-private ClientDatanodeProtocol proxy = null;
-private final Map cache;
-
-LocalDatanodeInfo() {
-  final int cacheSize = 1;
-  final float hashTableLoadFactor = 0.75f;
-  int hashTableCapacity = (int) Math.ceil(cacheSize / hashTableLoadFactor) 
+ 1;
-  cache = Collections
-  .synchronizedMap(new LinkedHashMap(
-  hashTableCapacity, hashTableLoadFactor, true) {
-private static final long serialVersionUID = 1;
-
-@Override
-protected 

[39/50] [abbrv] hadoop git commit: HDFS-8965. Harden edit log reading code against out of memory errors (cmccabe)

2015-09-01 Thread zhz
HDFS-8965. Harden edit log reading code against out of memory errors (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24f6a7c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24f6a7c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24f6a7c9

Branch: refs/heads/HDFS-7285
Commit: 24f6a7c9563757234f53ca23e12f9c9208b53082
Parents: 8fa41d9
Author: Colin Patrick Mccabe 
Authored: Mon Aug 31 17:31:29 2015 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Aug 31 18:06:30 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../bkjournal/BookKeeperEditLogInputStream.java |   2 +-
 .../hadoop/hdfs/protocol/LayoutVersion.java |   2 +-
 .../namenode/EditLogBackupInputStream.java  |   2 +-
 .../server/namenode/EditLogFileInputStream.java |   2 +-
 .../hdfs/server/namenode/FSEditLogOp.java   | 354 +--
 .../hdfs/server/namenode/TestEditLog.java   |   2 +-
 .../namenode/TestEditLogFileInputStream.java|  80 +
 8 files changed, 341 insertions(+), 105 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6584c84..57ddcb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -872,6 +872,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8946. Improve choosing datanode storage for block placement. (yliu)
 
+HDFS-8965. Harden edit log reading code against out of memory errors 
(cmccabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
index e2098dd..86da807 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
@@ -83,7 +83,7 @@ class BookKeeperEditLogInputStream extends EditLogInputStream 
{
 tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
 DataInputStream in = new DataInputStream(tracker);
 
-reader = new FSEditLogOp.Reader(in, tracker, logVersion);
+reader = FSEditLogOp.Reader.create(in, tracker, logVersion);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
index c893744..1750790 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
@@ -87,7 +87,7 @@ public class LayoutVersion {
 FSIMAGE_COMPRESSION(-25, "Support for fsimage compression"),
 FSIMAGE_CHECKSUM(-26, "Support checksum for fsimage"),
 REMOVE_REL13_DISK_LAYOUT_SUPPORT(-27, "Remove support for 0.13 disk 
layout"),
-EDITS_CHESKUM(-28, "Support checksum for editlog"),
+EDITS_CHECKSUM(-28, "Support checksum for editlog"),
 UNUSED(-29, "Skipped version"),
 FSIMAGE_NAME_OPTIMIZATION(-30, "Store only last part of path in fsimage"),
 RESERVED_REL20_203(-31, -19, "Reserved for release 0.20.203", true,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f6a7c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
 

[07/50] [abbrv] hadoop git commit: HDFS-8896. DataNode object isn't GCed when shutdown, because it has GC root in ShutdownHookManager. Contributed by Walter Su.

2015-09-01 Thread zhz
HDFS-8896. DataNode object isn't GCed when shutdown, because it has GC root in 
ShutdownHookManager. Contributed by Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f44b5990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f44b5990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f44b5990

Branch: refs/heads/HDFS-7285
Commit: f44b599003bb79b1ec9b92e7546546523ec01676
Parents: c992bcf
Author: Jing Zhao 
Authored: Wed Aug 26 16:26:18 2015 -0700
Committer: Jing Zhao 
Committed: Wed Aug 26 16:27:34 2015 -0700

--
 .../main/java/org/apache/hadoop/util/ShutdownHookManager.java  | 6 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java   | 2 ++
 3 files changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f44b5990/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
index 989c96a..85533db 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
@@ -187,4 +187,10 @@ public class ShutdownHookManager {
 return shutdownInProgress.get();
   }
 
+  /**
+   * clear all registered shutdownHooks.
+   */
+  public void clearShutdownHooks() {
+hooks.clear();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f44b5990/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 607de79..54c2d21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -841,6 +841,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8951. Move the shortcircuit package to hdfs-client.
 (Mingliang Liu via wheat9)
 
+HDFS-8896. DataNode object isn't GCed when shutdown, because it has GC
+root in ShutdownHookManager. (Walter Su via jing9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f44b5990/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 7052321..24e0965 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -119,6 +119,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -1867,6 +1868,7 @@ public class MiniDFSCluster {
 nameNode = null;
   }
 }
+ShutdownHookManager.get().clearShutdownHooks();
 if (base_dir != null) {
   if (deleteDfsDir) {
 base_dir.delete();



[46/50] [abbrv] hadoop git commit: Merge remote-tracking branch 'apache/trunk' into HDFS-7285

2015-09-01 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab56fcdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 6093776,000..7b21cbe
mode 100644,00..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@@ -1,258 -1,0 +1,253 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.hadoop.hdfs.server.blockmanagement;
 +
 +import org.apache.hadoop.hdfs.protocol.Block;
 +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 +import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 +
 +import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
 +
 +/**
 + * Subclass of {@link BlockInfo}, presenting a block group in erasure coding.
 + *
 + * We still use triplets to store DatanodeStorageInfo for each block in the
 + * block group, as well as the previous/next block in the corresponding
 + * DatanodeStorageInfo. For a (m+k) block group, the first (m+k) triplet units
 + * are sorted and strictly mapped to the corresponding block.
 + *
 + * Normally each block belonging to group is stored in only one DataNode.
 + * However, it is possible that some block is over-replicated. Thus the 
triplet
 + * array's size can be larger than (m+k). Thus currently we use an extra byte
 + * array to record the block index for each triplet.
 + */
 +public class BlockInfoStriped extends BlockInfo {
 +  private final ErasureCodingPolicy ecPolicy;
 +  /**
 +   * Always the same size with triplets. Record the block index for each 
triplet
 +   * TODO: actually this is only necessary for over-replicated block. Thus can
 +   * be further optimized to save memory usage.
 +   */
 +  private byte[] indices;
 +
 +  public BlockInfoStriped(Block blk, ErasureCodingPolicy ecPolicy) {
 +super(blk, (short) (ecPolicy.getNumDataUnits() + 
ecPolicy.getNumParityUnits()));
 +indices = new byte[ecPolicy.getNumDataUnits() + 
ecPolicy.getNumParityUnits()];
 +initIndices();
 +this.ecPolicy = ecPolicy;
 +  }
 +
-   BlockInfoStriped(BlockInfoStriped b) {
- this(b, b.getErasureCodingPolicy());
- this.setBlockCollection(b.getBlockCollection());
-   }
- 
 +  public short getTotalBlockNum() {
 +return (short) (ecPolicy.getNumDataUnits() + 
ecPolicy.getNumParityUnits());
 +  }
 +
 +  public short getDataBlockNum() {
 +return (short) ecPolicy.getNumDataUnits();
 +  }
 +
 +  public short getParityBlockNum() {
 +return (short) ecPolicy.getNumParityUnits();
 +  }
 +
 +  /**
 +   * If the block is committed/completed and its length is less than a full
 +   * stripe, it returns the the number of actual data blocks.
 +   * Otherwise it returns the number of data units specified by erasure 
coding policy.
 +   */
 +  public short getRealDataBlockNum() {
 +if (isComplete() || getBlockUCState() == BlockUCState.COMMITTED) {
 +  return (short) Math.min(getDataBlockNum(),
 +  (getNumBytes() - 1) / BLOCK_STRIPED_CELL_SIZE + 1);
 +} else {
 +  return getDataBlockNum();
 +}
 +  }
 +
 +  public short getRealTotalBlockNum() {
 +return (short) (getRealDataBlockNum() + getParityBlockNum());
 +  }
 +
 +  public ErasureCodingPolicy getErasureCodingPolicy() {
 +return ecPolicy;
 +  }
 +
 +  private void initIndices() {
 +for (int i = 0; i < indices.length; i++) {
 +  indices[i] = -1;
 +}
 +  }
 +
 +  private int findSlot() {
 +int i = getTotalBlockNum();
 +for (; i < getCapacity(); i++) {
 +  if (getStorageInfo(i) == null) {
 +return i;
 +  }
 +}
 +// need to expand the triplet size
 +ensureCapacity(i + 1, true);
 +return i;
 +  }
 +
 +  @Override
 +  boolean addStorage(DatanodeStorageInfo storage, 

[29/50] [abbrv] hadoop git commit: HDFS-8983. NameNode support for protected directories. (Contributed by Arpit Agarwal)

2015-09-01 Thread zhz
HDFS-8983. NameNode support for protected directories. (Contributed by Arpit 
Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdbe53c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdbe53c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdbe53c6

Branch: refs/heads/HDFS-7285
Commit: bdbe53c676dd4ff135ea2f64d3b9193fe43d7c8e
Parents: e2c9b28
Author: Arpit Agarwal 
Authored: Sat Aug 29 09:51:55 2015 -0700
Committer: Arpit Agarwal 
Committed: Sat Aug 29 09:52:37 2015 -0700

--
 .../fs/CommonConfigurationKeysPublic.java   |   7 +
 .../src/main/resources/core-default.xml |  10 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hdfs/server/namenode/FSDirDeleteOp.java |  40 ++
 .../hdfs/server/namenode/FSDirectory.java   |  63 
 .../namenode/TestProtectedDirectories.java  | 373 +++
 6 files changed, 495 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdbe53c6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 24d648f..f3bc2e1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -85,6 +85,13 @@ public class CommonConfigurationKeysPublic {
   /** Default value for FS_TRASH_CHECKPOINT_INTERVAL_KEY */
   public static final longFS_TRASH_CHECKPOINT_INTERVAL_DEFAULT = 0;
 
+  /**
+   * Directories that cannot be removed unless empty, even by an
+   * administrator.
+   */
+  public static final String FS_PROTECTED_DIRECTORIES =
+  "fs.protected.directories";
+
   // TBD: Code is still using hardcoded values (e.g. "fs.automatic.close")
   // instead of constant (e.g. FS_AUTOMATIC_CLOSE_KEY)
   //

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdbe53c6/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index d02f0ac..cef32d3 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -565,6 +565,16 @@ for ldap providers in the same way as above does.
 
 
 
+  fs.protected.directories
+  
+  A comma-separated list of directories which cannot
+be deleted even by the superuser unless they are empty. This
+setting can be used to guard important system directories
+against accidental deletion due to administrator error.
+  
+
+
+
   fs.AbstractFileSystem.file.impl
   org.apache.hadoop.fs.local.LocalFs
   The AbstractFileSystem for file: uris.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdbe53c6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a561909..6f46ea5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -861,6 +861,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8925. Move BlockReaderLocal to hdfs-client.
 (Mingliang Liu via wheat9)
 
+HDFS-8983. NameNode support for protected directories. (Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdbe53c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index b0e9a5c..51d643a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -17,15 +17,19 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.fs.Path;
 import 

[31/50] [abbrv] hadoop git commit: HDFS-8155. Support OAuth2 in WebHDFS.

2015-09-01 Thread zhz
HDFS-8155. Support OAuth2 in WebHDFS.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/837fb75e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/837fb75e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/837fb75e

Branch: refs/heads/HDFS-7285
Commit: 837fb75e8e03b2f016bcea2f4605106a5022491c
Parents: 6ab2d19
Author: Jakob Homan 
Authored: Sat Aug 29 18:37:05 2015 -0700
Committer: Jakob Homan 
Committed: Sat Aug 29 18:37:05 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   5 +
 .../hdfs/client/HdfsClientConfigKeys.java   |   8 +
 .../hadoop/hdfs/web/URLConnectionFactory.java   |  30 ++-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  15 +-
 .../hdfs/web/oauth2/AccessTokenProvider.java|  66 ++
 .../hdfs/web/oauth2/AccessTokenTimer.java   | 103 +
 .../ConfCredentialBasedAccessTokenProvider.java |  62 ++
 ...onfRefreshTokenBasedAccessTokenProvider.java | 146 +
 .../CredentialBasedAccessTokenProvider.java | 135 
 .../oauth2/OAuth2ConnectionConfigurator.java|  79 +++
 .../hadoop/hdfs/web/oauth2/OAuth2Constants.java |  46 
 .../apache/hadoop/hdfs/web/oauth2/Utils.java|  63 ++
 .../hadoop/hdfs/web/oauth2/package-info.java|  26 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   6 +
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md|  25 +++
 .../hadoop/hdfs/web/TestWebHDFSOAuth2.java  | 216 +++
 .../hdfs/web/oauth2/TestAccessTokenTimer.java   |  63 ++
 ...ClientCredentialTimeBasedTokenRefresher.java | 138 
 ...TestRefreshTokenTimeBasedTokenRefresher.java | 138 
 20 files changed, 1369 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index aeaa980..68bd289 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -32,6 +32,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   
 
+  com.squareup.okhttp
+  okhttp
+  2.4.0
+
+
   org.apache.hadoop
   hadoop-common
   provided

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 7b1e438..96bc8d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -36,6 +36,14 @@ public interface HdfsClientConfigKeys {
   String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
   
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
 
+  String DFS_WEBHDFS_OAUTH_ENABLED_KEY = "dfs.webhdfs.oauth2.enabled";
+  boolean DFS_WEBHDFS_OAUTH_ENABLED_DEFAULT = false;
+
+  String OAUTH_CLIENT_ID_KEY = "dfs.webhdfs.oauth2.client.id";
+  String OAUTH_REFRESH_URL_KEY = "dfs.webhdfs.oauth2.refresh.url";
+
+  String ACCESS_TOKEN_PROVIDER_KEY = 
"dfs.webhdfs.oauth2.access.token.provider";
+
   String PREFIX = "dfs.client.";
   String  DFS_NAMESERVICES = "dfs.nameservices";
   int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/837fb75e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index a5e02f2..4c23241 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -31,6 +31,7 @@ import javax.net.ssl.SSLSocketFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import 

[44/50] [abbrv] hadoop git commit: HADOOP-12359. hadoop fs -getmerge doc is wrong. Contributed by Jagadesh Kiran N.

2015-09-01 Thread zhz
HADOOP-12359. hadoop fs -getmerge doc is wrong. Contributed by Jagadesh Kiran N.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e251a76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e251a76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e251a76

Branch: refs/heads/HDFS-7285
Commit: 2e251a767427a38ecb6c309ad979feecb29a09f4
Parents: faa38e1
Author: Akira Ajisaka 
Authored: Tue Sep 1 20:55:33 2015 +0900
Committer: Akira Ajisaka 
Committed: Tue Sep 1 20:55:33 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt|  3 +++
 .../hadoop-common/src/site/markdown/FileSystemShell.md | 13 +++--
 2 files changed, 14 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e251a76/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 14e6fda..4eef964 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1120,6 +1120,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-12061. Incorrect command in single cluster setup document.
 (Kengo Seki via aajisaka)
 
+HADOOP-12359. hadoop fs -getmerge doc is wrong.
+(Jagadesh Kiran N via aajisaka)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e251a76/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index fb89ca1..d6d00e4 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -368,9 +368,18 @@ Returns 0 on success and non-zero on error.
 getmerge
 
 
-Usage: `hadoop fs -getmerge   [addnl]`
+Usage: `hadoop fs -getmerge [-nl]  `
 
-Takes a source directory and a destination file as input and concatenates 
files in src into the destination local file. Optionally addnl can be set to 
enable adding a newline character at the end of each file.
+Takes a source directory and a destination file as input and concatenates 
files in src into the destination local file. Optionally -nl can be set to 
enable adding a newline character (LF) at the end of each file.
+
+Examples:
+
+* `hadoop fs -getmerge -nl  /src  /opt/output.txt`
+* `hadoop fs -getmerge -nl  /src/file1.txt /src/file2.txt  /output.txt`
+
+Exit Code:
+
+Returns 0 on success and non-zero on error.
 
 help
 



[40/50] [abbrv] hadoop git commit: HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.

2015-09-01 Thread zhz
HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ad3556e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ad3556e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ad3556e

Branch: refs/heads/HDFS-7285
Commit: 7ad3556ed38560585579172aa68356f37b2288c8
Parents: 24f6a7c
Author: Andrew Wang 
Authored: Mon Aug 31 18:17:14 2015 -0700
Committer: Andrew Wang 
Committed: Mon Aug 31 18:17:14 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java  | 2 +-
 .../src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad3556e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 95eb677..0f52d22 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -756,6 +756,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
 (Anu Engineer via xyao)
 
+HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
+(wang)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad3556e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index 7fad990..ea4d9b1 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -77,7 +77,7 @@ import org.junit.Test;
  * 
  */
 
-public class ViewFileSystemBaseTest {
+abstract public class ViewFileSystemBaseTest {
   FileSystem fsView;  // the view file system - the mounts are here
   FileSystem fsTarget;  // the target file system - the mount will point here
   Path targetTestRoot;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad3556e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
index d8ab539..ceebb26 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
@@ -76,7 +76,7 @@ import org.mockito.Mockito;
  * @AfterClasspublic static void ClusterShutdownAtEnd()
  * 
  */
-public class ViewFsBaseTest {
+abstract public class ViewFsBaseTest {
   FileContext fcView; // the view file system - the mounts are here
   FileContext fcTarget; // the target file system - the mount will point here
   Path targetTestRoot;



[37/50] [abbrv] hadoop git commit: Move YARN-4092 to 2.7.2

2015-09-01 Thread zhz
Move YARN-4092 to 2.7.2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4eaa7fd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4eaa7fd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4eaa7fd3

Branch: refs/heads/HDFS-7285
Commit: 4eaa7fd3eae4412ac0b964c617b1bbb17a39d8be
Parents: a3fd2cc
Author: Jian He 
Authored: Mon Aug 31 17:43:36 2015 -0700
Committer: Jian He 
Committed: Mon Aug 31 17:43:36 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4eaa7fd3/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 19c1082..80cf793 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -422,9 +422,6 @@ Release 2.8.0 - UNRELEASED
 YARN-1556. NPE getting application report with a null appId. (Weiwei Yang 
via 
 junping_du)
 
-YARN-4092. Fixed UI redirection to print useful messages when both RMs are
-in standby mode. (Xuan Gong via jianhe)
-
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not
@@ -824,6 +821,8 @@ Release 2.7.2 - UNRELEASED
 YARN-3978. Configurably turn off the saving of container info in Generic 
AHS
 (Eric Payne via jeagles)
 
+YARN-4092. Fixed UI redirection to print useful messages when both RMs are
+in standby mode. (Xuan Gong via jianhe)
 
   OPTIMIZATIONS
 



[33/50] [abbrv] hadoop git commit: HDFS-8980. Remove unnecessary block replacement in INodeFile. Contributed by Jing Zhao.

2015-09-01 Thread zhz
HDFS-8980. Remove unnecessary block replacement in INodeFile. Contributed by 
Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/caa04de1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/caa04de1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/caa04de1

Branch: refs/heads/HDFS-7285
Commit: caa04de149030691b7bc952b534c6128db217ed2
Parents: cf83156
Author: Jing Zhao 
Authored: Mon Aug 31 11:48:09 2015 -0700
Committer: Jing Zhao 
Committed: Mon Aug 31 11:48:09 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hdfs/server/blockmanagement/BlockInfo.java  | 19 +--
 .../blockmanagement/BlockInfoContiguous.java| 15 -
 .../server/blockmanagement/BlockManager.java| 58 +++-
 .../hdfs/server/blockmanagement/BlocksMap.java  | 16 --
 .../hdfs/server/namenode/FSEditLogLoader.java   |  8 +--
 6 files changed, 29 insertions(+), 89 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa04de1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3382f81..7b5979e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -865,6 +865,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8983. NameNode support for protected directories. (Arpit Agarwal)
 
+HDFS-8980. Remove unnecessary block replacement in INodeFile. (jing9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa04de1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 706cbcd..810784d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -36,7 +36,7 @@ import static 
org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
  * the block are stored.
  */
 @InterfaceAudience.Private
-public abstract class  BlockInfo extends Block
+public abstract class BlockInfo extends Block
 implements LightWeightGSet.LinkedElement {
 
   public static final BlockInfo[] EMPTY_ARRAY = {};
@@ -207,12 +207,6 @@ public abstract class  BlockInfo extends Block
   abstract boolean removeStorage(DatanodeStorageInfo storage);
 
   /**
-   * Replace the current BlockInfo with the new one in corresponding
-   * DatanodeStorageInfo's linked list
-   */
-  abstract void replaceBlock(BlockInfo newBlock);
-
-  /**
* Find specified DatanodeStorageInfo.
* @return DatanodeStorageInfo or null if not found.
*/
@@ -372,19 +366,12 @@ public abstract class  BlockInfo extends Block
   }
 
   /**
-   * Convert an under construction block to a complete block.
-   *
-   * @return BlockInfo - a complete block.
-   * @throws IOException if the state of the block
-   * (the generation stamp and the length) has not been committed by
-   * the client or it does not have at least a minimal number of replicas
-   * reported from data-nodes.
+   * Convert an under construction block to complete.
*/
-  BlockInfo convertToCompleteBlock() throws IOException {
+  void convertToCompleteBlock() {
 assert getBlockUCState() != BlockUCState.COMPLETE :
 "Trying to convert a COMPLETE block";
 uc = null;
-return this;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa04de1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 42934c3..94fb222 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -104,19 +104,4 @@ public class BlockInfoContiguous extends BlockInfo {
 }
 return 0;
   }

[41/50] [abbrv] hadoop git commit: HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via yliu)

2015-09-01 Thread zhz
HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via 
yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d96be6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d96be6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d96be6

Branch: refs/heads/HDFS-7285
Commit: f4d96be6c637ff54903615cff04b365e25bb3229
Parents: 7ad3556
Author: yliu 
Authored: Tue Sep 1 16:20:56 2015 +0800
Committer: yliu 
Committed: Tue Sep 1 16:20:56 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +++
 hadoop-common-project/hadoop-common/pom.xml |  19 +--
 .../java/org/apache/hadoop/fs/test-untar.tar| Bin 20480 -> 0 bytes
 .../java/org/apache/hadoop/fs/test-untar.tgz| Bin 2024 -> 0 bytes
 .../src/test/resources/test-untar.tar   | Bin 0 -> 20480 bytes
 .../src/test/resources/test-untar.tgz   | Bin 0 -> 2024 bytes
 6 files changed, 4 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0f52d22..14e6fda 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -759,6 +759,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
 (wang)
 
+HADOOP-12367. Move TestFileUtil's test resources to resources folder.
+(wang via yliu)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 282735d..3ae09a0 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -454,23 +454,6 @@
 
   
   
-copy-test-tarballs
-process-test-resources
-
-  run
-
-
-  
-
-  
-
-
-  
-
-  
-
-  
-  
 pre-site
 
   run
@@ -505,7 +488,7 @@
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h
-
src/test/java/org/apache/hadoop/fs/test-untar.tgz
+src/test/resources/test-untar.tgz
 src/test/resources/test.har/_SUCCESS
 src/test/resources/test.har/_index
 src/test/resources/test.har/_masterindex

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
deleted file mode 100644
index 949e985..000
Binary files 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
deleted file mode 100644
index 9e9ef40..000
Binary files 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar 
b/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar
new file mode 100644
index 000..949e985
Binary files /dev/null and 
b/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar differ


[42/50] [abbrv] hadoop git commit: YARN-4082. Container shouldn't be killed when node's label updated. Contributed by Wangda Tan.

2015-09-01 Thread zhz
YARN-4082. Container shouldn't be killed when node's label updated. Contributed 
by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf669b6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf669b6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf669b6d

Branch: refs/heads/HDFS-7285
Commit: bf669b6d9f8ba165e30b8823218d625a49958925
Parents: f4d96be
Author: Varun Vasudev 
Authored: Tue Sep 1 14:19:11 2015 +0530
Committer: Varun Vasudev 
Committed: Tue Sep 1 14:19:11 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/capacity/AbstractCSQueue.java |  27 ++
 .../scheduler/capacity/CSQueue.java |  26 ++
 .../scheduler/capacity/CapacityScheduler.java   |  40 +--
 .../scheduler/capacity/LeafQueue.java   |  16 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java |   9 +
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 249 ---
 7 files changed, 314 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 80cf793..999654d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -804,6 +804,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3896. RMNode transitioned from RUNNING to REBOOTED because its 
response id 
 has not been reset synchronously. (Jun Gong via rohithsharmaks)
 
+YARN-4082. Container shouldn't be killed when node's label updated.
+(Wangda Tan via vvasudev)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf669b6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 792c25c..0ae4d1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -543,6 +544,32 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
   }
   
+  @Override
+  public void incUsedResource(String nodeLabel, Resource resourceToInc,
+  SchedulerApplicationAttempt application) {
+if (nodeLabel == null) {
+  nodeLabel = RMNodeLabelsManager.NO_LABEL;
+}
+// ResourceUsage has its own lock, no addition lock needs here.
+queueUsage.incUsed(nodeLabel, resourceToInc);
+if (null != parent) {
+  parent.incUsedResource(nodeLabel, resourceToInc, null);
+}
+  }
+
+  @Override
+  public void decUsedResource(String nodeLabel, Resource resourceToDec,
+  SchedulerApplicationAttempt application) {
+if (nodeLabel == null) {
+  nodeLabel = RMNodeLabelsManager.NO_LABEL;
+}
+// ResourceUsage has its own lock, no addition lock needs here.
+queueUsage.decUsed(nodeLabel, resourceToDec);
+if (null != parent) {
+  parent.decUsedResource(nodeLabel, resourceToDec, null);
+}
+  }
+
   /**
* Return if the queue has pending resource on given nodePartition and
* schedulingMode. 


[16/50] [abbrv] hadoop git commit: YARN-3250. Support admin cli interface in for Application Priority. Contributed by Rohith Sharma K S

2015-09-01 Thread zhz
YARN-3250. Support admin cli interface in for Application Priority. Contributed 
by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9c8ea71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9c8ea71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9c8ea71

Branch: refs/heads/HDFS-7285
Commit: a9c8ea71aa427ff5f25caec98be15bc880e578a7
Parents: f97a0f8
Author: Jian He 
Authored: Thu Aug 27 13:25:48 2015 -0700
Committer: Jian He 
Committed: Thu Aug 27 13:25:53 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../ResourceManagerAdministrationProtocol.java  |  8 +++
 .../RefreshClusterMaxPriorityRequest.java   | 35 +
 .../RefreshClusterMaxPriorityResponse.java  | 36 ++
 ...esourcemanager_administration_protocol.proto |  1 +
 ..._server_resourcemanager_service_protos.proto |  5 ++
 .../hadoop/yarn/client/cli/RMAdminCLI.java  | 15 
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  |  9 +++
 ...nagerAdministrationProtocolPBClientImpl.java | 20 ++
 ...agerAdministrationProtocolPBServiceImpl.java | 23 ++
 .../RefreshClusterMaxPriorityRequestPBImpl.java | 74 
 ...RefreshClusterMaxPriorityResponsePBImpl.java | 73 +++
 .../server/resourcemanager/AdminService.java| 28 
 .../scheduler/AbstractYarnScheduler.java| 25 +++
 .../scheduler/YarnScheduler.java|  9 +++
 .../scheduler/capacity/CapacityScheduler.java   |  9 +--
 .../resourcemanager/TestRMAdminService.java | 34 +
 17 files changed, 399 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9c8ea71/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1190619..51715cf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -184,6 +184,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4014. Support user cli interface in for Application Priority.
 (Rohith Sharma K S via jianhe)
 
+YARN-3250. Support admin cli interface in for Application Priority.
+(Rohith Sharma K S via jianhe)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9c8ea71/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
index 36dfbc0..08a258c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
@@ -33,6 +33,8 @@ import 
org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioning
 import 
org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
 import 
org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
 import 
org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest;
@@ -128,4 +130,10 @@ public interface ResourceManagerAdministrationProtocol 
extends GetUserMappingsPr
   public CheckForDecommissioningNodesResponse checkForDecommissioningNodes(
   CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest)
   throws YarnException, IOException;
+
+  @Private
+  @Idempotent
+  public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
+  RefreshClusterMaxPriorityRequest request) throws YarnException,
+  IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9c8ea71/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshClusterMaxPriorityRequest.java

hadoop git commit: HDFS-8388. Time and Date format need to be in sync in NameNode UI page. Contributed by Surendra Singh Lilhore.

2015-09-01 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a61c0f5ca -> 25efccba0


HDFS-8388. Time and Date format need to be in sync in NameNode UI page. 
Contributed by Surendra Singh Lilhore.

(cherry picked from commit 65ccf2b1252a5c83755fa24a93cf1d30ee59b2c3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25efccba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25efccba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25efccba

Branch: refs/heads/branch-2
Commit: 25efccba01d02bedebfb7c92e6d2e9ecc2e26926
Parents: a61c0f5
Author: Akira Ajisaka 
Authored: Wed Sep 2 14:28:38 2015 +0900
Committer: Akira Ajisaka 
Committed: Wed Sep 2 14:30:23 2015 +0900

--
 .../hadoop-common/src/site/markdown/Metrics.md   | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 5 +
 .../apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java   | 6 ++
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 5 ++---
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js   | 6 +++---
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html  | 1 +
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.js| 2 +-
 .../hadoop-hdfs/src/main/webapps/static/dfs-dust.js  | 8 +++-
 9 files changed, 30 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25efccba/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 2e9a4f7..75dfc9f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -191,6 +191,8 @@ Each metrics record contains tags such as ProcessName, 
SessionId, and Hostname a
 | `GetImageAvgTime` | Average fsimage download time in milliseconds |
 | `PutImageNumOps` | Total number of fsimage uploads to SecondaryNameNode |
 | `PutImageAvgTime` | Average fsimage upload time in milliseconds |
+| `NNStarted`| NameNode start time |
+| `NNStartedTimeInMillis`| NameNode start time in milliseconds |
 
 FSNamesystem
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25efccba/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 73a93b2..2b3f264 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -926,6 +926,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8950. NameNode refresh doesn't remove DataNodes that are no longer in
 the allowed list (Daniel Templeton)
 
+HDFS-8388. Time and Date format need to be in sync in NameNode UI page.
+(Surendra Singh Lilhore via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25efccba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 433445b..84bb82c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -6126,6 +6126,11 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return getStartTime().toString();
   }
 
+  @Override // NameNodeMXBean
+  public long getNNStartedTimeInMillis() {
+return startTime;
+  }
+
   @Override  // NameNodeMXBean
   public String getCompileInfo() {
 return VersionInfo.getDate() + " by " + VersionInfo.getUser() +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25efccba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
index 0e4d445..00c1abe 100644
--- 

[36/50] [abbrv] hadoop git commit: YARN-4092. Fixed UI redirection to print useful messages when both RMs are in standby mode. Contributed by Xuan Gong

2015-09-01 Thread zhz
YARN-4092. Fixed UI redirection to print useful messages when both RMs are in 
standby mode. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3fd2ccc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3fd2ccc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3fd2ccc

Branch: refs/heads/HDFS-7285
Commit: a3fd2ccc869dfc1f04d1cf0a8678d4d90a43a80f
Parents: 826ae1c
Author: Jian He 
Authored: Mon Aug 31 17:33:24 2015 -0700
Committer: Jian He 
Committed: Mon Aug 31 17:33:24 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../hadoop/yarn/client/TestRMFailover.java  | 27 ++
 .../hadoop/yarn/webapp/YarnWebParams.java   |  1 +
 .../resourcemanager/webapp/RMWebAppFilter.java  | 90 +++-
 4 files changed, 117 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3fd2ccc/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4201b4f..19c1082 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -422,6 +422,9 @@ Release 2.8.0 - UNRELEASED
 YARN-1556. NPE getting application report with a null appId. (Weiwei Yang 
via 
 junping_du)
 
+YARN-4092. Fixed UI redirection to print useful messages when both RMs are
+in standby mode. (Xuan Gong via jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3fd2ccc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index 0d03fd4..cbc220a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -27,6 +27,7 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
+
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
@@ -45,6 +46,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.AdminService;
 import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -265,6 +267,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
 getAdminService(0).transitionToActive(req);
 String rm1Url = "http://0.0.0.0:18088;;
 String rm2Url = "http://0.0.0.0:28088;;
+
 String redirectURL = getRedirectURL(rm2Url);
 // if uri is null, RMWebAppFilter will append a slash at the trail of the 
redirection url
 assertEquals(redirectURL,rm1Url+"/");
@@ -304,6 +307,17 @@ public class TestRMFailover extends ClientBaseWithFixes {
 
 redirectURL = getRedirectURL(rm2Url + "/proxy/" + fakeAppId);
 assertNull(redirectURL);
+
+// transit the active RM to standby
+// Both of RMs are in standby mode
+getAdminService(0).transitionToStandby(req);
+// RM2 is expected to send the httpRequest to itself.
+// The Header Field: Refresh is expected to be set.
+redirectURL = getRefreshURL(rm2Url);
+assertTrue(redirectURL != null
+&& redirectURL.contains(YarnWebParams.NEXT_REFRESH_INTERVAL)
+&& redirectURL.contains(rm2Url));
+
   }
 
   // set up http connection with the given url and get the redirection url 
from the response
@@ -323,4 +337,17 @@ public class TestRMFailover extends ClientBaseWithFixes {
 return redirectUrl;
   }
 
+  static String getRefreshURL(String url) {
+String redirectUrl = null;
+try {
+  HttpURLConnection conn = (HttpURLConnection) new 
URL(url).openConnection();
+  // do not automatically follow the redirection
+  // otherwise we get too many redirections exception
+  conn.setInstanceFollowRedirects(false);
+  redirectUrl = conn.getHeaderField("Refresh");
+} catch (Exception e) {
+  // throw new RuntimeException(e);
+}
+return redirectUrl;
+  }
 }


[27/50] [abbrv] hadoop git commit: HDFS-8925. Move BlockReaderLocal to hdfs-client. Contributed by Mingliang Liu.

2015-09-01 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2c9b288/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java
new file mode 100644
index 000..55aa741
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.LinkedListMultimap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.util.IOUtilsClient;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A cache of input stream sockets to Data Node.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+@VisibleForTesting
+public class PeerCache {
+  private static final Logger LOG = LoggerFactory.getLogger(PeerCache.class);
+  
+  private static class Key {
+final DatanodeID dnID;
+final boolean isDomain;
+
+Key(DatanodeID dnID, boolean isDomain) {
+  this.dnID = dnID;
+  this.isDomain = isDomain;
+}
+
+@Override
+public boolean equals(Object o) {
+  if (!(o instanceof Key)) {
+return false;
+  }
+  Key other = (Key)o;
+  return dnID.equals(other.dnID) && isDomain == other.isDomain;
+}
+
+@Override
+public int hashCode() {
+  return dnID.hashCode() ^ (isDomain ? 1 : 0);
+}
+  }
+  
+  private static class Value {
+private final Peer peer;
+private final long time;
+
+Value(Peer peer, long time) {
+  this.peer = peer;
+  this.time = time;
+}
+
+Peer getPeer() {
+  return peer;
+}
+
+long getTime() {
+  return time;
+}
+  }
+
+  private Daemon daemon;
+  /** A map for per user per datanode. */
+  private final LinkedListMultimap multimap =
+LinkedListMultimap.create();
+  private final int capacity;
+  private final long expiryPeriod;
+  
+  public PeerCache(int c, long e) {
+this.capacity = c;
+this.expiryPeriod = e;
+
+if (capacity == 0 ) {
+  LOG.info("SocketCache disabled.");
+} else if (expiryPeriod == 0) {
+  throw new IllegalStateException("Cannot initialize expiryPeriod to " +
+ expiryPeriod + " when cache is enabled.");
+}
+  }
+ 
+  private boolean isDaemonStarted() {
+return (daemon == null)? false: true;
+  }
+
+  private synchronized void startExpiryDaemon() {
+// start daemon only if not already started
+if (isDaemonStarted() == true) {
+  return;
+}
+
+daemon = new Daemon(new Runnable() {
+  @Override
+  public void run() {
+try {
+  PeerCache.this.run();
+} catch(InterruptedException e) {
+  //noop
+} finally {
+  PeerCache.this.clear();
+}
+  }
+
+  @Override
+  public String toString() {
+return String.valueOf(PeerCache.this);
+  }
+});
+daemon.start();
+  }
+
+  /**
+   * Get a cached peer connected to the given DataNode.
+   * @param dnId The DataNode to get a Peer for.
+   * @param isDomain Whether to retrieve a DomainPeer or not.
+   *
+   * @return An open Peer connected to the DN, or null if none
+   * was found. 
+   */
+  public Peer get(DatanodeID dnId, boolean isDomain) {
+
+if (capacity <= 0) { // disabled
+  return null;
+}
+return getInternal(dnId, isDomain);
+  }
+
+  

[48/50] [abbrv] hadoop git commit: Merge remote-tracking branch 'apache/trunk' into HDFS-7285

2015-09-01 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/53358fe6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6c6d758,1346ab3..8232ab9
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@@ -674,8 -648,8 +674,8 @@@ public class BlockManager implements Bl
return false; // already completed (e.g. by syncBlock)
  
  final boolean b = commitBlock(lastBlock, commitBlock);
- if (hasMinStorage(lastBlock)) {
-   completeBlock(bc, bc.numBlocks() - 1, false);
 -if (countNodes(lastBlock).liveReplicas() >= minReplication) {
++  if (hasMinStorage(lastBlock)) {
+   completeBlock(lastBlock, false);
  }
  return b;
}
@@@ -698,9 -666,9 +692,9 @@@
  }
  
  int numNodes = curBlock.numNodes();
 -if (!force && numNodes < minReplication) {
 +if (!force && !hasMinStorage(curBlock, numNodes)) {
-   throw new IOException("Cannot complete block: " +
-   "block does not satisfy minimal replication requirement.");
+   throw new IOException("Cannot complete block: "
+   + "block does not satisfy minimal replication requirement.");
  }
  if (!force && curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
throw new IOException(
@@@ -718,26 -683,10 +709,12 @@@
  // a "forced" completion when a file is getting closed by an
  // OP_CLOSE edit on the standby).
  namesystem.adjustSafeModeBlockTotals(0, 1);
 +final int minStorage = curBlock.isStriped() ?
 +((BlockInfoStriped) curBlock).getRealDataBlockNum() : minReplication;
  namesystem.incrementSafeBlockCount(
 -Math.min(numNodes, minReplication));
 +Math.min(numNodes, minStorage), curBlock);
- 
- // replace block in the blocksMap
- return blocksMap.replaceBlock(completeBlock);
}
  
-   private BlockInfo completeBlock(final BlockCollection bc,
-   final BlockInfo block, boolean force) throws IOException {
- BlockInfo[] fileBlocks = bc.getBlocks();
- for (int idx = 0; idx < fileBlocks.length; idx++) {
-   if (fileBlocks[idx] == block) {
- return completeBlock(bc, idx, force);
-   }
- }
- return block;
-   }
-   
/**
 * Force the given block in the given file to be marked as complete,
 * regardless of whether enough replicas are present. This is necessary
@@@ -1270,37 -1162,29 +1245,36 @@@
private void markBlockAsCorrupt(BlockToMarkCorrupt b,
DatanodeStorageInfo storageInfo,
DatanodeDescriptor node) throws IOException {
--
- if (b.stored.isDeleted()) {
 -if (b.getCorrupted().isDeleted()) {
++if (b.getStored().isDeleted()) {
blockLog.debug("BLOCK markBlockAsCorrupt: {} cannot be marked as" +
" corrupt as it does not belong to any file", b);
-   addToInvalidates(b.corrupted, node);
+   addToInvalidates(b.getCorrupted(), node);
return;
 -} 
 -short expectedReplicas = b.getCorrupted().getReplication();
 +}
 +short expectedReplicas =
- getExpectedReplicaNum(b.stored);
++getExpectedReplicaNum(b.getStored());
  
  // Add replica to the data-node if it is not already there
  if (storageInfo != null) {
-   storageInfo.addBlock(b.stored, b.corrupted);
 -  storageInfo.addBlock(b.getStored());
++  storageInfo.addBlock(b.getStored(), b.getCorrupted());
  }
  
 -// Add this replica to corruptReplicas Map
 -corruptReplicas.addToCorruptReplicasMap(b.getCorrupted(), node,
 -b.getReason(), b.getReasonCode());
 +// Add this replica to corruptReplicas Map. For striped blocks, we always
 +// use the id of whole striped block group when adding to corruptReplicas
- Block corrupted = new Block(b.corrupted);
- if (b.stored.isStriped()) {
-   corrupted.setBlockId(b.stored.getBlockId());
++Block corrupted = new Block(b.getCorrupted());
++if (b.getStored().isStriped()) {
++  corrupted.setBlockId(b.getStored().getBlockId());
 +}
- corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.reason,
- b.reasonCode);
++corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
++b.getReasonCode());
  
- NumberReplicas numberOfReplicas = countNodes(b.stored);
+ NumberReplicas numberOfReplicas = countNodes(b.getStored());
  boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=
  expectedReplicas;
 -boolean minReplicationSatisfied =
 -numberOfReplicas.liveReplicas() >= minReplication;
 +
- boolean minReplicationSatisfied = 

[01/50] [abbrv] hadoop git commit: YARN-4014. Support user cli interface in for Application Priority. Contributed by Rohith Sharma K S

2015-09-01 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 164cbe643 -> 53358fe68


YARN-4014. Support user cli interface in for Application Priority. Contributed 
by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57c7ae1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57c7ae1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57c7ae1a

Branch: refs/heads/HDFS-7285
Commit: 57c7ae1affb2e1821fbdc3f47738d7e6fd83c7c1
Parents: 3b00eae
Author: Jian He 
Authored: Mon Aug 24 20:36:08 2015 -0700
Committer: Jian He 
Committed: Mon Aug 24 20:36:44 2015 -0700

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   7 +
 .../hadoop/mapred/TestClientRedirect.java   |   9 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/api/ApplicationClientProtocol.java |  18 ++
 .../UpdateApplicationPriorityRequest.java   |  80 +
 .../UpdateApplicationPriorityResponse.java  |  47 +
 .../main/proto/applicationclient_protocol.proto |   1 +
 .../src/main/proto/yarn_service_protos.proto|   8 +
 .../hadoop/yarn/client/api/YarnClient.java  |  17 ++
 .../yarn/client/api/impl/YarnClientImpl.java|  11 ++
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  29 
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  29 
 .../ApplicationClientProtocolPBClientImpl.java  |  20 +++
 .../ApplicationClientProtocolPBServiceImpl.java |  22 +++
 .../UpdateApplicationPriorityRequestPBImpl.java | 171 +++
 ...UpdateApplicationPriorityResponsePBImpl.java |  69 
 .../server/resourcemanager/ClientRMService.java |  73 
 .../server/resourcemanager/RMAuditLogger.java   |   2 +
 .../resourcemanager/recovery/RMStateStore.java  |  12 +-
 .../recovery/RMStateUpdateAppEvent.java |  13 ++
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../resourcemanager/TestClientRMService.java|  63 +++
 22 files changed, 713 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c7ae1a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 90f6876..91c3086 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
@@ -466,4 +467,10 @@ public class ResourceMgrDelegate extends YarnClient {
   throws YarnException, IOException {
 return client.getClusterNodeLabels();
   }
+
+  @Override
+  public void updateApplicationPriority(ApplicationId applicationId,
+  Priority priority) throws YarnException, IOException {
+client.updateApplicationPriority(applicationId, priority);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c7ae1a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index bb00b19..1bf1408 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -114,6 +114,8 @@ import 

[15/50] [abbrv] hadoop git commit: HDFS-8969. Clean up findbugs warnings for HDFS-8823 and HDFS-8932. Contributed by Anu Engineer.

2015-09-01 Thread zhz
HDFS-8969. Clean up findbugs warnings for HDFS-8823 and HDFS-8932. Contributed 
by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f97a0f8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f97a0f8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f97a0f8c

Branch: refs/heads/HDFS-7285
Commit: f97a0f8c2cdad0668a3892319f6969fafc2f04cd
Parents: 90fe7bc
Author: Haohui Mai 
Authored: Thu Aug 27 13:03:16 2015 -0700
Committer: Haohui Mai 
Committed: Thu Aug 27 13:03:16 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java   | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java | 4 +---
 3 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f97a0f8c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e779d37..9cc3326 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1240,6 +1240,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8961. Investigate lock issue in o.a.h.hdfs.shortcircuit.
 DfsClientShmManager.EndpointShmManager. (Mingliang Liu via wheat9)
 
+HDFS-8969. Clean up findbugs warnings for HDFS-8823 and HDFS-8932.
+(Anu Engineer via wheat9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f97a0f8c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index faaea63..edf88c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1695,7 +1695,7 @@ public class FSEditLog implements LogsPurgeable {
* Return total number of syncs happened on this edit log.
* @return long - count
*/
-  public long getTotalSyncCount() {
+  public synchronized long getTotalSyncCount() {
 if (editLogStream != null) {
   return editLogStream.getNumSync();
 } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f97a0f8c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index cf6fd44..7ebe859 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -254,9 +254,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   NumberReplicas numberReplicas= bm.countNodes(blockInfo);
   out.println("Block Id: " + blockId);
   out.println("Block belongs to: "+iNode.getFullPathName());
-  if (blockInfo != null) {
-out.println("No. of Expected Replica: " + blockInfo.getReplication());
-  }
+  out.println("No. of Expected Replica: " + blockInfo.getReplication());
   out.println("No. of live Replica: " + numberReplicas.liveReplicas());
   out.println("No. of excess Replica: " + numberReplicas.excessReplicas());
   out.println("No. of stale Replica: " +



[09/50] [abbrv] hadoop git commit: HDFS-8682. Should not remove decommissioned node, while calculating the number of live/dead decommissioned node. (Contributed by J. Andreina)

2015-09-01 Thread zhz
HDFS-8682. Should not remove decommissioned node,while calculating the number 
of live/dead decommissioned node. (Contributed by J. Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdb56f74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdb56f74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdb56f74

Branch: refs/heads/HDFS-7285
Commit: fdb56f74f38cabb0f94e0781fcedb1594904c026
Parents: 4cbbfa2
Author: Vinayakumar B 
Authored: Thu Aug 27 13:06:43 2015 +0530
Committer: Vinayakumar B 
Committed: Thu Aug 27 13:06:43 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdb56f74/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e432da0..42eed14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1228,6 +1228,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8932. NPE thrown in NameNode when try to get TotalSyncCount metric
 before editLogStream initialization. (Surendra Singh Lilhore via xyao)
 
+HDFS-8682. Should not remove decommissioned node,while calculating the
+number of live/dead decommissioned node. (J. Andreina via vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdb56f74/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f1738bb..f4952f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5070,7 +5070,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   @Override // FSNamesystemMBean
   public int getNumDecomLiveDataNodes() {
 final List live = new ArrayList();
-getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
+getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
 int liveDecommissioned = 0;
 for (DatanodeDescriptor node : live) {
   liveDecommissioned += node.isDecommissioned() ? 1 : 0;
@@ -5081,7 +5081,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   @Override // FSNamesystemMBean
   public int getNumDecomDeadDataNodes() {
 final List dead = new ArrayList();
-getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true);
+getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, false);
 int deadDecommissioned = 0;
 for (DatanodeDescriptor node : dead) {
   deadDecommissioned += node.isDecommissioned() ? 1 : 0;



[22/50] [abbrv] hadoop git commit: MAPREDUCE-6452. NPE when intermediate encrypt enabled for LocalRunner. Contributed by Zhihai Xu

2015-09-01 Thread zhz
MAPREDUCE-6452. NPE when intermediate encrypt enabled for LocalRunner. 
Contributed by Zhihai Xu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbb24953
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbb24953
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbb24953

Branch: refs/heads/HDFS-7285
Commit: cbb249534aa72ff6c290c4f99766415aeea9d6f5
Parents: b6ceee9
Author: Zhihai Xu 
Authored: Fri Aug 28 12:13:23 2015 -0700
Committer: Zhihai Xu 
Committed: Fri Aug 28 12:13:23 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  7 +
 .../apache/hadoop/mapred/LocalJobRunner.java| 27 
 .../hadoop/mapred/TestLocalJobSubmission.java   | 25 ++
 3 files changed, 59 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb24953/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 361a19b..27af9f9 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -559,6 +559,13 @@ Release 2.8.0 - UNRELEASED
 committing is not utilized when input path is absolute.
 (Dustin Cote via aajisaka)
 
+MAPREDUCE-6357. MultipleOutputs.write() API should document that output
+committing is not utilized when input path is absolute.
+(Dustin Cote via aajisaka)
+
+MAPREDUCE-6452. NPE when intermediate encrypt enabled for LocalRunner.
+(Zhihai Xu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb24953/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
index b685502..45d3cc5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
@@ -24,6 +24,7 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -36,6 +37,8 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import javax.crypto.KeyGenerator;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -47,7 +50,9 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
 import org.apache.hadoop.mapreduce.ClusterMetrics;
+import org.apache.hadoop.mapreduce.CryptoUtils;
 import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.QueueInfo;
 import org.apache.hadoop.mapreduce.TaskCompletionEvent;
@@ -55,6 +60,7 @@ import org.apache.hadoop.mapreduce.TaskTrackerInfo;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.checkpoint.TaskCheckpointID;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.mapreduce.security.TokenCache;
 import 
org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
@@ -84,6 +90,8 @@ public class LocalJobRunner implements ClientProtocol {
   public static final String LOCAL_MAX_REDUCES =
 "mapreduce.local.reduce.tasks.maximum";
 
+  public static final String INTERMEDIATE_DATA_ENCRYPTION_ALGO = "HmacSHA1";
+
   private FileSystem fs;
   private HashMap jobs = new HashMap();
   private JobConf conf;
@@ -188,6 +196,25 @@ public class LocalJobRunner implements ClientProtocol {
 
   jobs.put(id, this);
 
+  if (CryptoUtils.isEncryptedSpillEnabled(job)) {
+try {
+  int keyLen = conf.getInt(
+  

[04/50] [abbrv] hadoop git commit: HDFS-8846. Add a unit test for INotify functionality across a layout version upgrade (Zhe Zhang via Colin P. McCabe)

2015-09-01 Thread zhz
HDFS-8846. Add a unit test for INotify functionality across a layout version 
upgrade (Zhe Zhang via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4d9acc5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4d9acc5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4d9acc5

Branch: refs/heads/HDFS-7285
Commit: a4d9acc51d1a977bc333da17780c00c72e8546f1
Parents: eee0d45
Author: Colin Patrick Mccabe 
Authored: Tue Aug 25 14:09:13 2015 -0700
Committer: Colin Patrick Mccabe 
Committed: Tue Aug 25 14:29:53 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/TestDFSInotifyEventInputStream.java|   2 +-
 .../org/apache/hadoop/hdfs/TestDFSUpgrade.java  |  78 +-
 .../hadoop/hdfs/TestDFSUpgradeFromImage.java| 107 ++-
 .../src/test/resources/hadoop-252-dfs-dir.tgz   | Bin 0 -> 14112 bytes
 5 files changed, 108 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d9acc5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2c47b50..fd91744 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -835,6 +835,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
 
+HDFS-8846. Add a unit test for INotify functionality across a layout
+version upgrade (Zhe Zhang via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d9acc5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
index e7bbcac..97f34f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
@@ -51,7 +51,7 @@ public class TestDFSInotifyEventInputStream {
   private static final Log LOG = LogFactory.getLog(
   TestDFSInotifyEventInputStream.class);
 
-  private static EventBatch waitForNextEvents(DFSInotifyEventInputStream eis)
+  public static EventBatch waitForNextEvents(DFSInotifyEventInputStream eis)
 throws IOException, MissingEventsException {
 EventBatch batch = null;
 while ((batch = eis.poll()) == null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d9acc5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index 8cc47c3..fe1ede0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -28,18 +28,12 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.File;
-import java.io.FilenameFilter;
 import java.io.IOException;
-import java.nio.file.Files;
-import java.util.List;
 import java.util.regex.Pattern;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.inotify.Event;
-import org.apache.hadoop.hdfs.inotify.EventBatch;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -48,21 +42,13 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;

[10/50] [abbrv] hadoop git commit: HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value. Contributed by Gautam Gopalakrishnan.

2015-09-01 Thread zhz
HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value. 
Contributed by Gautam Gopalakrishnan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bf28541
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bf28541
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bf28541

Branch: refs/heads/HDFS-7285
Commit: 0bf285413f8fcaadbb2d5817fe8090f5fb0d37d9
Parents: fdb56f7
Author: Harsh J 
Authored: Thu Aug 27 16:22:48 2015 +0530
Committer: Harsh J 
Committed: Thu Aug 27 16:22:48 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 5 +
 .../java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java | 7 +++
 3 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bf28541/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 42eed14..29ecf7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -359,6 +359,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value
+(Gautam Gopalakrishnan via harsh)
+
 HDFS-8821. Explain message "Operation category X is not supported
 in state standby" (Gautam Gopalakrishnan via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bf28541/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 014637b..298d55e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -851,6 +851,11 @@ public class DFSAdmin extends FsShell {
   return exitCode;
 }
 
+if (bandwidth < 0) {
+  System.err.println("Bandwidth should be a non-negative integer");
+  return exitCode;
+}
+
 FileSystem fs = getFS();
 if (!(fs instanceof DistributedFileSystem)) {
   System.err.println("FileSystem is " + fs.getUri());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bf28541/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index 6859e43..a6c0924 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -193,6 +193,13 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 3)
+  public void testSetNegativeBalancerBandwidth() throws Exception {
+setUpHaCluster(false);
+int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "-10"});
+assertEquals("Negative bandwidth value must fail the command", -1, 
exitCode);
+  }
+
+  @Test (timeout = 3)
   public void testMetaSave() throws Exception {
 setUpHaCluster(false);
 int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});



[20/50] [abbrv] hadoop git commit: YARN-1556. NPE getting application report with a null appId. Contributed by Weiwei Yang.

2015-09-01 Thread zhz
YARN-1556. NPE getting application report with a null appId. Contributed by 
Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/beb65c94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/beb65c94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/beb65c94

Branch: refs/heads/HDFS-7285
Commit: beb65c9465806114237aa271b07b31ff3c1f4404
Parents: e166c03
Author: Junping Du 
Authored: Fri Aug 28 05:57:34 2015 -0700
Committer: Junping Du 
Committed: Fri Aug 28 05:57:34 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  7 +--
 .../yarn/server/resourcemanager/ClientRMService.java|  3 +++
 .../server/resourcemanager/TestClientRMService.java | 12 
 3 files changed, 20 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/beb65c94/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 51715cf..0b733a4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -413,12 +413,15 @@ Release 2.8.0 - UNRELEASED
 YARN-4026. Refactored ContainerAllocator to accept a list of priorites
 rather than a single priority. (Wangda Tan via jianhe)
 
-   YARN-4031. Add JvmPauseMonitor to ApplicationHistoryServer and
-   WebAppProxyServer (djp via rkanter)
+YARN-4031. Add JvmPauseMonitor to ApplicationHistoryServer and
+WebAppProxyServer (djp via rkanter)
 
 YARN-4057. If ContainersMonitor is not enabled, only print
 related log info one time. (Jun Gong via zxu)
 
+YARN-1556. NPE getting application report with a null appId. (Weiwei Yang 
via 
+junping_du)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/beb65c94/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 3e16165..cce0fe5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -315,6 +315,9 @@ public class ClientRMService extends AbstractService 
implements
   public GetApplicationReportResponse getApplicationReport(
   GetApplicationReportRequest request) throws YarnException {
 ApplicationId applicationId = request.getApplicationId();
+if (applicationId == null) {
+  throw new ApplicationNotFoundException("Invalid application id: null");
+}
 
 UserGroupInformation callerUGI;
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/beb65c94/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 8031759..6a0b99c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -333,6 +333,18 @@ public class TestClientRMService {
   report.getApplicationResourceUsageReport();
   Assert.assertEquals(10, usageReport.getMemorySeconds());
   Assert.assertEquals(3, usageReport.getVcoreSeconds());
+
+  // if application id is null
+  GetApplicationReportRequest invalidRequest = recordFactory
+  

  1   2   >