hadoop git commit: HDFS-7224. Allow reuse of NN connections via webhdfs. Contributed by Eric Payne (cherry picked from commit 2b0fa20f69417326a92beac10ffa072db2616e73)

2015-01-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2551cc6e6 - c081fa6ae


 HDFS-7224. Allow reuse of NN connections via webhdfs. Contributed by Eric Payne
(cherry picked from commit 2b0fa20f69417326a92beac10ffa072db2616e73)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c081fa6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c081fa6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c081fa6a

Branch: refs/heads/branch-2
Commit: c081fa6ae8fbd38eb3b6fb2ba0aa3ba1ac779240
Parents: 2551cc6
Author: Kihwal Lee kih...@apache.org
Authored: Mon Jan 26 08:15:37 2015 -0600
Committer: Kihwal Lee kih...@apache.org
Committed: Mon Jan 26 08:15:37 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 20 
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   | 49 
 3 files changed, 64 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c081fa6a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 82d933c..1220678 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -265,6 +265,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7623. Add htrace configuration properties to core-default.xml and
 update user doc about how to enable htrace. (yliu)
 
+HDFS-7224. Allow reuse of NN connections via webhdfs (Eric Payne via
+kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c081fa6a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 7284b55..2a3faf7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -313,16 +313,20 @@ public class WebHdfsFileSystem extends FileSystem
 if (in == null) {
   throw new IOException(The  + (useErrorStream? error: input) +  
stream is null.);
 }
-final String contentType = c.getContentType();
-if (contentType != null) {
-  final MediaType parsed = MediaType.valueOf(contentType);
-  if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) {
-throw new IOException(Content-Type \ + contentType
-+ \ is incompatible with \ + MediaType.APPLICATION_JSON
-+ \ (parsed=\ + parsed + \));
+try {
+  final String contentType = c.getContentType();
+  if (contentType != null) {
+final MediaType parsed = MediaType.valueOf(contentType);
+if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) {
+  throw new IOException(Content-Type \ + contentType
+  + \ is incompatible with \ + MediaType.APPLICATION_JSON
+  + \ (parsed=\ + parsed + \));
+}
   }
+  return (Map?, ?)JSON.parse(new InputStreamReader(in, Charsets.UTF_8));
+} finally {
+  in.close();
 }
-return (Map?, ?)JSON.parse(new InputStreamReader(in, Charsets.UTF_8));
   }
 
   private static Map?, ? validateResponse(final HttpOpParam.Op op,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c081fa6a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
index a5bb41d..295aff0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
@@ -17,8 +17,14 @@
  */
 package org.apache.hadoop.hdfs.web;
 
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.doReturn;
+
 import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
 import java.net.URI;
+import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -32,6 +38,8 

hadoop git commit: HDFS-7224. Allow reuse of NN connections via webhdfs. Contributed by Eric Payne

2015-01-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7b82c4ab4 - 2b0fa20f6


 HDFS-7224. Allow reuse of NN connections via webhdfs. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b0fa20f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b0fa20f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b0fa20f

Branch: refs/heads/trunk
Commit: 2b0fa20f69417326a92beac10ffa072db2616e73
Parents: 7b82c4a
Author: Kihwal Lee kih...@apache.org
Authored: Mon Jan 26 08:14:30 2015 -0600
Committer: Kihwal Lee kih...@apache.org
Committed: Mon Jan 26 08:14:30 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 20 
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   | 49 
 3 files changed, 64 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b0fa20f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a6cbf8f..39453d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -546,6 +546,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7623. Add htrace configuration properties to core-default.xml and
 update user doc about how to enable htrace. (yliu)
 
+HDFS-7224. Allow reuse of NN connections via webhdfs (Eric Payne via
+kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b0fa20f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 559efdb..460e78b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -312,16 +312,20 @@ public class WebHdfsFileSystem extends FileSystem
 if (in == null) {
   throw new IOException(The  + (useErrorStream? error: input) +  
stream is null.);
 }
-final String contentType = c.getContentType();
-if (contentType != null) {
-  final MediaType parsed = MediaType.valueOf(contentType);
-  if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) {
-throw new IOException(Content-Type \ + contentType
-+ \ is incompatible with \ + MediaType.APPLICATION_JSON
-+ \ (parsed=\ + parsed + \));
+try {
+  final String contentType = c.getContentType();
+  if (contentType != null) {
+final MediaType parsed = MediaType.valueOf(contentType);
+if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) {
+  throw new IOException(Content-Type \ + contentType
+  + \ is incompatible with \ + MediaType.APPLICATION_JSON
+  + \ (parsed=\ + parsed + \));
+}
   }
+  return (Map?, ?)JSON.parse(new InputStreamReader(in, Charsets.UTF_8));
+} finally {
+  in.close();
 }
-return (Map?, ?)JSON.parse(new InputStreamReader(in, Charsets.UTF_8));
   }
 
   private static Map?, ? validateResponse(final HttpOpParam.Op op,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b0fa20f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
index b4216f0..4975a87 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
@@ -17,8 +17,14 @@
  */
 package org.apache.hadoop.hdfs.web;
 
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.doReturn;
+
 import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
 import java.net.URI;
+import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -32,6 +38,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import 

hadoop git commit: MAPREDUCE-6141. History server leveldb recovery store. Contributed by Jason Lowe (cherry picked from commit 56b7ec71a69820ae12b4b9e2eb04b7368f721dbf)

2015-01-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 07fe6a36c - 2cf58ca5a


MAPREDUCE-6141. History server leveldb recovery store. Contributed by Jason Lowe
(cherry picked from commit 56b7ec71a69820ae12b4b9e2eb04b7368f721dbf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cf58ca5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cf58ca5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cf58ca5

Branch: refs/heads/branch-2
Commit: 2cf58ca5a698b5c9aef601671f72b821185333d9
Parents: 07fe6a3
Author: Jason Lowe jl...@apache.org
Authored: Mon Jan 26 16:28:55 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon Jan 26 16:30:10 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt|   2 +
 .../mapreduce/v2/jobhistory/JHAdminConfig.java  |   7 +
 .../src/main/resources/mapred-default.xml   |   8 +
 .../hadoop-mapreduce-client-hs/pom.xml  |   4 +
 .../HistoryServerLeveldbStateStoreService.java  | 379 +++
 ...stHistoryServerLeveldbStateStoreService.java | 207 ++
 6 files changed, 607 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cf58ca5/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 56a7b22..4173712 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -30,6 +30,8 @@ Release 2.7.0 - UNRELEASED
 cache with enabling wired encryption at the same time. 
 (Junping Du via xgong)
 
+MAPREDUCE-6141. History server leveldb recovery store (jlowe)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cf58ca5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
index e5a49b5..f7cba9f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
@@ -197,6 +197,13 @@ public class JHAdminConfig {
   public static final String MR_HS_FS_STATE_STORE_URI =
   MR_HISTORY_PREFIX + recovery.store.fs.uri;
 
+  /**
+   * The local path where server state will be stored when
+   * HistoryServerLeveldbStateStoreService is configured as the state store
+   */
+  public static final String MR_HS_LEVELDB_STATE_STORE_PATH =
+  MR_HISTORY_PREFIX + recovery.store.leveldb.path;
+
   /** Whether to use fixed ports with the minicluster. */
   public static final String MR_HISTORY_MINICLUSTER_FIXED_PORTS = 
MR_HISTORY_PREFIX
+ minicluster.fixed.ports;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cf58ca5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 85aa0e5..a14d7ed 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1987,6 +1987,14 @@
 /property
 
 property
+  namemapreduce.jobhistory.recovery.store.leveldb.path/name
+  value${hadoop.tmp.dir}/mapred/history/recoverystore/value
+  descriptionThe URI where history server state will be stored if
+  HistoryServerLeveldbSystemStateStoreService is configured as the recovery
+  storage class./description
+/property
+
+property
   namemapreduce.jobhistory.http.policy/name
   valueHTTP_ONLY/value
   description

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cf58ca5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
--

hadoop git commit: MAPREDUCE-6141. History server leveldb recovery store. Contributed by Jason Lowe

2015-01-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 902c6ea7e - 56b7ec71a


MAPREDUCE-6141. History server leveldb recovery store. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56b7ec71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56b7ec71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56b7ec71

Branch: refs/heads/trunk
Commit: 56b7ec71a69820ae12b4b9e2eb04b7368f721dbf
Parents: 902c6ea
Author: Jason Lowe jl...@apache.org
Authored: Mon Jan 26 16:28:55 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon Jan 26 16:28:55 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt|   2 +
 .../mapreduce/v2/jobhistory/JHAdminConfig.java  |   7 +
 .../src/main/resources/mapred-default.xml   |   8 +
 .../hadoop-mapreduce-client-hs/pom.xml  |   4 +
 .../HistoryServerLeveldbStateStoreService.java  | 379 +++
 ...stHistoryServerLeveldbStateStoreService.java | 207 ++
 6 files changed, 607 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b7ec71/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b28fc65..35ceb2e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -262,6 +262,8 @@ Release 2.7.0 - UNRELEASED
 cache with enabling wired encryption at the same time. 
 (Junping Du via xgong)
 
+MAPREDUCE-6141. History server leveldb recovery store (jlowe)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b7ec71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
index e5a49b5..f7cba9f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
@@ -197,6 +197,13 @@ public class JHAdminConfig {
   public static final String MR_HS_FS_STATE_STORE_URI =
   MR_HISTORY_PREFIX + recovery.store.fs.uri;
 
+  /**
+   * The local path where server state will be stored when
+   * HistoryServerLeveldbStateStoreService is configured as the state store
+   */
+  public static final String MR_HS_LEVELDB_STATE_STORE_PATH =
+  MR_HISTORY_PREFIX + recovery.store.leveldb.path;
+
   /** Whether to use fixed ports with the minicluster. */
   public static final String MR_HISTORY_MINICLUSTER_FIXED_PORTS = 
MR_HISTORY_PREFIX
+ minicluster.fixed.ports;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b7ec71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 57a17a8..4535137 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1569,6 +1569,14 @@
 /property
 
 property
+  namemapreduce.jobhistory.recovery.store.leveldb.path/name
+  value${hadoop.tmp.dir}/mapred/history/recoverystore/value
+  descriptionThe URI where history server state will be stored if
+  HistoryServerLeveldbSystemStateStoreService is configured as the recovery
+  storage class./description
+/property
+
+property
   namemapreduce.jobhistory.http.policy/name
   valueHTTP_ONLY/value
   description

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b7ec71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
--
diff --git 

hadoop git commit: YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native executor returns an error. Contributed by Eric Payne

2015-01-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2b0fa20f6 - 902c6ea7e


YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native executor 
returns an error. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/902c6ea7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/902c6ea7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/902c6ea7

Branch: refs/heads/trunk
Commit: 902c6ea7e4d3b49e49d9ce51ae9d12694ecfcf89
Parents: 2b0fa20
Author: Jason Lowe jl...@apache.org
Authored: Mon Jan 26 15:40:21 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon Jan 26 15:40:21 2015 +

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../nodemanager/LinuxContainerExecutor.java |  9 +++-
 .../TestLinuxContainerExecutorWithMocks.java| 49 +++-
 3 files changed, 58 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/902c6ea7/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0808678..872f16e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -409,6 +409,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3082. Non thread safe access to systemCredentials in 
NodeHeartbeatResponse
 processing. (Anubhav Dhoot via ozawa)
 
+YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native
+executor returns an error (Eric Payne via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902c6ea7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 4606f0c..d6e6894 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -392,18 +392,23 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 verifyUsernamePattern(user);
 String runAsUser = getRunAsUser(user);
 
+String dirString = dir == null ?  : dir.toUri().getPath();
+
 ListString command = new ArrayListString(
 Arrays.asList(containerExecutorExe,
 runAsUser,
 user,
 Integer.toString(Commands.DELETE_AS_USER.getValue()),
-dir == null ?  : dir.toUri().getPath()));
+dirString));
+ListString pathsToDelete = new ArrayListString();
 if (baseDirs == null || baseDirs.length == 0) {
   LOG.info(Deleting absolute path :  + dir);
+  pathsToDelete.add(dirString);
 } else {
   for (Path baseDir : baseDirs) {
 Path del = dir == null ? baseDir : new Path(baseDir, dir);
 LOG.info(Deleting path :  + del);
+pathsToDelete.add(del.toString());
 command.add(baseDir.toUri().getPath());
   }
 }
@@ -419,7 +424,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   }
 } catch (IOException e) {
   int exitCode = shExec.getExitCode();
-  LOG.error(DeleteAsUser for  + dir.toUri().getPath()
+  LOG.error(DeleteAsUser for  + StringUtils.join( , pathsToDelete)
   +  returned with exit code:  + exitCode, e);
   LOG.error(Output from LinuxContainerExecutor's deleteAsUser follows:);
   logOutput(shExec.getOutput());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902c6ea7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
 

[Hadoop Wiki] Trivial Update of ContributorsGroup by SteveLoughran

2015-01-26 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The ContributorsGroup page has been changed by SteveLoughran:
https://wiki.apache.org/hadoop/ContributorsGroup?action=diffrev1=96rev2=97

Comment:
add DavidTing

   * DavidLaPalomento
   * DavidMarin
   * DavidMenestrina
+  * DavidTing
   * DavidWang
   * Denis
   * DevopamMittra


hadoop git commit: YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native executor returns an error. Contributed by Eric Payne (cherry picked from commit 902c6ea7e4d3b49e49d9ce51ae9d12

2015-01-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c081fa6ae - 07fe6a36c


YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native executor 
returns an error. Contributed by Eric Payne
(cherry picked from commit 902c6ea7e4d3b49e49d9ce51ae9d12694ecfcf89)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07fe6a36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07fe6a36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07fe6a36

Branch: refs/heads/branch-2
Commit: 07fe6a36cb0901b9540c9c278fb2593ffa27427f
Parents: c081fa6
Author: Jason Lowe jl...@apache.org
Authored: Mon Jan 26 15:40:21 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon Jan 26 15:41:23 2015 +

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../nodemanager/LinuxContainerExecutor.java |  9 +++-
 .../TestLinuxContainerExecutorWithMocks.java| 49 +++-
 3 files changed, 58 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07fe6a36/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 503b491..b811023 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -375,6 +375,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3082. Non thread safe access to systemCredentials in 
NodeHeartbeatResponse
 processing. (Anubhav Dhoot via ozawa)
 
+YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native
+executor returns an error (Eric Payne via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07fe6a36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 7135805..59b35ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -390,18 +390,23 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 verifyUsernamePattern(user);
 String runAsUser = getRunAsUser(user);
 
+String dirString = dir == null ?  : dir.toUri().getPath();
+
 ListString command = new ArrayListString(
 Arrays.asList(containerExecutorExe,
 runAsUser,
 user,
 Integer.toString(Commands.DELETE_AS_USER.getValue()),
-dir == null ?  : dir.toUri().getPath()));
+dirString));
+ListString pathsToDelete = new ArrayListString();
 if (baseDirs == null || baseDirs.length == 0) {
   LOG.info(Deleting absolute path :  + dir);
+  pathsToDelete.add(dirString);
 } else {
   for (Path baseDir : baseDirs) {
 Path del = dir == null ? baseDir : new Path(baseDir, dir);
 LOG.info(Deleting path :  + del);
+pathsToDelete.add(del.toString());
 command.add(baseDir.toUri().getPath());
   }
 }
@@ -417,7 +422,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   }
 } catch (IOException e) {
   int exitCode = shExec.getExitCode();
-  LOG.error(DeleteAsUser for  + dir.toUri().getPath()
+  LOG.error(DeleteAsUser for  + StringUtils.join( , pathsToDelete)
   +  returned with exit code:  + exitCode, e);
   LOG.error(Output from LinuxContainerExecutor's deleteAsUser follows:);
   logOutput(shExec.getOutput());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07fe6a36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
 

hadoop git commit: HADOOP-11466: move to 2.6.1

2015-01-26 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a389056af - 7f1441291


HADOOP-11466: move to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f144129
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f144129
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f144129

Branch: refs/heads/branch-2
Commit: 7f1441291d85d76c9b9f6cc04340b60a81b907f0
Parents: a389056
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Jan 26 11:28:02 2015 -0800
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Jan 26 11:28:02 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 20 
 1 file changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f144129/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0bfd02a..e37bffe 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -386,10 +386,6 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11327. BloomFilter#not() omits the last bit, resulting in an
 incorrect filter (Eric Payne via jlowe)
 
-HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
-architecture because it is slower there (Suman Somasundar via Colin P.
-McCabe)
-
 HADOOP-11209. Configuration#updatingResource/finalParameters are not
 thread-safe. (Varun Saxena via ozawa)
 
@@ -411,6 +407,22 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11499. Check of executorThreadsStarted in
 ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
 
+Release 2.6.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
+architecture because it is slower there (Suman Somasundar via Colin P.
+McCabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



hadoop git commit: HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC architecture because it is slower there (Suman Somasundar via Colin P. McCabe)

2015-01-26 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 4f5d33062 - ebb515f5e


HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC 
architecture because it is slower there (Suman Somasundar via Colin P.  McCabe)

(cherry picked from commit ee7d22e90ce67de3e7ee92f309c048a1d4be0bbe)
(cherry picked from commit 2c69f8cf81aed731762524e821088fd779210601)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebb515f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebb515f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebb515f5

Branch: refs/heads/branch-2.6
Commit: ebb515f5e765c9a4c103dd8ba07956f1928c5413
Parents: 4f5d330
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Jan 21 16:33:02 2015 -0800
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Jan 26 11:19:24 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 16 +++
 .../apache/hadoop/io/FastByteComparisons.java   | 21 +++-
 2 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebb515f5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 626ed8b..583f6ae 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1,5 +1,21 @@
 Hadoop Change Log
 
+Release 2.6.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+HADOOP-11466: FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
+architecture because it is slower there (Suman Somasundar via Colin P.
+McCabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebb515f5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
index 3f5881b..a3fea31 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
@@ -24,6 +24,9 @@ import java.security.PrivilegedAction;
 
 import sun.misc.Unsafe;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 import com.google.common.primitives.Longs;
 import com.google.common.primitives.UnsignedBytes;
 
@@ -33,6 +36,7 @@ import com.google.common.primitives.UnsignedBytes;
  * class to be able to compare arrays that start at non-zero offsets.
  */
 abstract class FastByteComparisons {
+  static final Log LOG = LogFactory.getLog(FastByteComparisons.class);
 
   /**
* Lexicographically compare two byte arrays.
@@ -71,6 +75,13 @@ abstract class FastByteComparisons {
  * implementation if unable to do so.
  */
 static Comparerbyte[] getBestComparer() {
+  if (System.getProperty(os.arch).equals(sparc)) {
+if (LOG.isTraceEnabled()) {
+  LOG.trace(Lexicographical comparer selected for 
+  + byte aligned system architecture);
+}
+return lexicographicalComparerJavaImpl();
+  }
   try {
 Class? theClass = Class.forName(UNSAFE_COMPARER_NAME);
 
@@ -78,8 +89,16 @@ abstract class FastByteComparisons {
 @SuppressWarnings(unchecked)
 Comparerbyte[] comparer =
   (Comparerbyte[]) theClass.getEnumConstants()[0];
+if (LOG.isTraceEnabled()) {
+  LOG.trace(Unsafe comparer selected for 
+  + byte unaligned system architecture);
+}
 return comparer;
   } catch (Throwable t) { // ensure we really catch *everything*
+if (LOG.isTraceEnabled()) {
+  LOG.trace(t.getMessage());
+  LOG.trace(Lexicographical comparer selected);
+}
 return lexicographicalComparerJavaImpl();
   }
 }
@@ -234,4 +253,4 @@ abstract class FastByteComparisons {
   }
 }
   }
-}
\ No newline at end of file
+}



hadoop git commit: HADOOP-11466: move to 2.6.1

2015-01-26 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7574df1bb - 21d559906


HADOOP-11466: move to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21d55990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21d55990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21d55990

Branch: refs/heads/trunk
Commit: 21d5599067adf14d589732a586c3b10aeb0936e9
Parents: 7574df1
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Jan 26 11:28:02 2015 -0800
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Jan 26 11:28:31 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 20 
 1 file changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21d55990/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 598f750..e0da851 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -742,10 +742,6 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11327. BloomFilter#not() omits the last bit, resulting in an
 incorrect filter (Eric Payne via jlowe)
 
-HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
-architecture because it is slower there (Suman Somasundar via Colin P.
-McCabe)
-
 HADOOP-11209. Configuration#updatingResource/finalParameters are not
 thread-safe. (Varun Saxena via ozawa)
 
@@ -767,6 +763,22 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11499. Check of executorThreadsStarted in
 ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
 
+Release 2.6.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
+architecture because it is slower there (Suman Somasundar via Colin P.
+McCabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



hadoop git commit: HADOOP-11499. Check of executorThreadsStarted in ValueQueue#submitRefillTask() evades lock acquisition. Contributed by Ted Yu

2015-01-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 56b7ec71a - 7574df1bb


HADOOP-11499. Check of executorThreadsStarted in ValueQueue#submitRefillTask() 
evades lock acquisition. Contributed by Ted Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7574df1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7574df1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7574df1b

Branch: refs/heads/trunk
Commit: 7574df1bba33919348d3009f2578d6a81b5818e6
Parents: 56b7ec7
Author: Jason Lowe jl...@apache.org
Authored: Mon Jan 26 16:56:14 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon Jan 26 16:56:14 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../org/apache/hadoop/crypto/key/kms/ValueQueue.java| 12 +++-
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7574df1b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 662f580..598f750 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -764,6 +764,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11482. Use correct UGI when KMSClientProvider is called by a proxy
 user. Contributed by Arun Suresh.
 
+HADOOP-11499. Check of executorThreadsStarted in
+ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7574df1b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
index 8e67ecc..32451d8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
@@ -308,11 +308,13 @@ public class ValueQueue E {
   final QueueE keyQueue) throws InterruptedException {
 if (!executorThreadsStarted) {
   synchronized (this) {
-// To ensure all requests are first queued, make coreThreads =
-// maxThreads
-// and pre-start all the Core Threads.
-executor.prestartAllCoreThreads();
-executorThreadsStarted = true;
+if (!executorThreadsStarted) {
+  // To ensure all requests are first queued, make coreThreads =
+  // maxThreads
+  // and pre-start all the Core Threads.
+  executor.prestartAllCoreThreads();
+  executorThreadsStarted = true;
+}
   }
 }
 // The submit/execute method of the ThreadPoolExecutor is bypassed and



[49/50] [abbrv] hadoop git commit: YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native executor returns an error. Contributed by Eric Payne

2015-01-26 Thread zhz
YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native executor 
returns an error. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b327379b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b327379b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b327379b

Branch: refs/heads/HDFS-EC
Commit: b327379b2aa5da9a48ff5258df1760d6523f04d7
Parents: 21f5c51
Author: Jason Lowe jl...@apache.org
Authored: Mon Jan 26 15:40:21 2015 +
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:31 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../nodemanager/LinuxContainerExecutor.java |  9 +++-
 .../TestLinuxContainerExecutorWithMocks.java| 49 +++-
 3 files changed, 58 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b327379b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0808678..872f16e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -409,6 +409,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3082. Non thread safe access to systemCredentials in 
NodeHeartbeatResponse
 processing. (Anubhav Dhoot via ozawa)
 
+YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native
+executor returns an error (Eric Payne via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b327379b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 4606f0c..d6e6894 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -392,18 +392,23 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 verifyUsernamePattern(user);
 String runAsUser = getRunAsUser(user);
 
+String dirString = dir == null ?  : dir.toUri().getPath();
+
 ListString command = new ArrayListString(
 Arrays.asList(containerExecutorExe,
 runAsUser,
 user,
 Integer.toString(Commands.DELETE_AS_USER.getValue()),
-dir == null ?  : dir.toUri().getPath()));
+dirString));
+ListString pathsToDelete = new ArrayListString();
 if (baseDirs == null || baseDirs.length == 0) {
   LOG.info(Deleting absolute path :  + dir);
+  pathsToDelete.add(dirString);
 } else {
   for (Path baseDir : baseDirs) {
 Path del = dir == null ? baseDir : new Path(baseDir, dir);
 LOG.info(Deleting path :  + del);
+pathsToDelete.add(del.toString());
 command.add(baseDir.toUri().getPath());
   }
 }
@@ -419,7 +424,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   }
 } catch (IOException e) {
   int exitCode = shExec.getExitCode();
-  LOG.error(DeleteAsUser for  + dir.toUri().getPath()
+  LOG.error(DeleteAsUser for  + StringUtils.join( , pathsToDelete)
   +  returned with exit code:  + exitCode, e);
   LOG.error(Output from LinuxContainerExecutor's deleteAsUser follows:);
   logOutput(shExec.getOutput());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b327379b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index d54367a..98ab8e0 100644
--- 

[28/50] [abbrv] hadoop git commit: HDFS-7575. Upgrade should generate a unique storage ID for each volume. (Contributed by Arpit Agarwal)

2015-01-26 Thread zhz
HDFS-7575. Upgrade should generate a unique storage ID for each volume. 
(Contributed by Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abb01154
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abb01154
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abb01154

Branch: refs/heads/HDFS-EC
Commit: abb011549865e845059e9b388377fba178b3b391
Parents: 3f90fca
Author: Arpit Agarwal a...@apache.org
Authored: Thu Jan 22 14:08:20 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:28 2015 -0800

--
 .../hdfs/server/datanode/DataStorage.java   |  35 +++--
 .../hdfs/server/protocol/DatanodeStorage.java   |  19 ++-
 .../hadoop/hdfs/TestDFSUpgradeFromImage.java|  19 ++-
 .../hadoop/hdfs/TestDatanodeLayoutUpgrade.java  |   2 +-
 ...estDatanodeStartupFixesLegacyStorageIDs.java | 139 +++
 .../apache/hadoop/hdfs/UpgradeUtilities.java|   2 +-
 .../server/datanode/SimulatedFSDataset.java |   2 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |   2 +-
 .../testUpgradeFrom22FixesStorageIDs.tgz| Bin 0 - 3260 bytes
 .../testUpgradeFrom22FixesStorageIDs.txt|  25 
 .../testUpgradeFrom22via26FixesStorageIDs.tgz   | Bin 0 - 3635 bytes
 .../testUpgradeFrom22via26FixesStorageIDs.txt   |  25 
 .../testUpgradeFrom26PreservesStorageIDs.tgz| Bin 0 - 3852 bytes
 .../testUpgradeFrom26PreservesStorageIDs.txt|  25 
 14 files changed, 274 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abb01154/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 3ea8ce3..0602dfb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -142,11 +143,20 @@ public class DataStorage extends Storage {
 this.datanodeUuid = newDatanodeUuid;
   }
 
-  /** Create an ID for this storage. */
-  public synchronized void createStorageID(StorageDirectory sd) {
-if (sd.getStorageUuid() == null) {
+  /** Create an ID for this storage.
+   * @return true if a new storage ID was generated.
+   * */
+  public synchronized boolean createStorageID(
+  StorageDirectory sd, boolean regenerateStorageIds) {
+final String oldStorageID = sd.getStorageUuid();
+if (oldStorageID == null || regenerateStorageIds) {
   sd.setStorageUuid(DatanodeStorage.generateUuid());
+  LOG.info(Generated new storageID  + sd.getStorageUuid() +
+   for directory  + sd.getRoot() +
+  (oldStorageID == null ?  : ( to replace  + oldStorageID)));
+  return true;
 }
+return false;
   }
 
   /**
@@ -677,20 +687,25 @@ public class DataStorage extends Storage {
   + sd.getRoot().getCanonicalPath() + : namenode clusterID = 
   + nsInfo.getClusterID() + ; datanode clusterID =  + 
getClusterID());
 }
-
-// After addition of the federation feature, ctime check is only 
-// meaningful at BlockPoolSliceStorage level. 
 
-// regular start up. 
+// Clusters previously upgraded from layout versions earlier than
+// ADD_DATANODE_AND_STORAGE_UUIDS failed to correctly generate a
+// new storage ID. We check for that and fix it now.
+boolean haveValidStorageId =
+DataNodeLayoutVersion.supports(
+LayoutVersion.Feature.ADD_DATANODE_AND_STORAGE_UUIDS, 
layoutVersion) 
+DatanodeStorage.isValidStorageId(sd.getStorageUuid());
+
+// regular start up.
 if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION) {
-  createStorageID(sd);
+  createStorageID(sd, !haveValidStorageId);
   return; // regular startup
 }
-
+
 // do upgrade
 if (this.layoutVersion  HdfsConstants.DATANODE_LAYOUT_VERSION) {
   doUpgrade(datanode, sd, nsInfo);  // upgrade
-  

[26/50] [abbrv] hadoop git commit: HADOOP-11500. InputStream is left unclosed in ApplicationClassLoader. Contributed by Ted Yu.

2015-01-26 Thread zhz
HADOOP-11500. InputStream is left unclosed in ApplicationClassLoader. 
Contributed by Ted Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f90fca1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f90fca1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f90fca1

Branch: refs/heads/HDFS-EC
Commit: 3f90fca19117ed9d59391f23a30140bd4ff09f98
Parents: ea85ae4
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Fri Jan 23 00:50:19 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:28 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/util/ApplicationClassLoader.java| 6 ++
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f90fca1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index eb9015c..aaa7041 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -744,6 +744,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11209. Configuration#updatingResource/finalParameters are not
 thread-safe. (Varun Saxena via ozawa)
 
+HADOOP-11500. InputStream is left unclosed in ApplicationClassLoader.
+(Ted Yu via ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f90fca1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
index 9f16b61..6d37c28 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
@@ -67,10 +67,8 @@ public class ApplicationClassLoader extends URLClassLoader {
   };
 
   static {
-InputStream is = null;
-try {
-  is = ApplicationClassLoader.class.getClassLoader().
-  getResourceAsStream(PROPERTIES_FILE);
+try (InputStream is = ApplicationClassLoader.class.getClassLoader()
+.getResourceAsStream(PROPERTIES_FILE);) {
   if (is == null) {
 throw new ExceptionInInitializerError(properties file  +
 PROPERTIES_FILE +  is not found);



[41/50] [abbrv] hadoop git commit: HDFS-7644. minor typo in HttpFS doc (Charles Lamb via aw)

2015-01-26 Thread zhz
HDFS-7644. minor typo in HttpFS doc (Charles Lamb via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3c4294b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3c4294b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3c4294b

Branch: refs/heads/HDFS-EC
Commit: f3c4294bce71116d81e31c808e3e0782c74b455f
Parents: d56e750
Author: Allen Wittenauer a...@apache.org
Authored: Fri Jan 23 13:46:31 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:30 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3c4294b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm
index 2920cd9..f51e743 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm
@@ -64,7 +64,7 @@ Hadoop HDFS over HTTP - Documentation Sets ${project.version}
 
   HttpFS was inspired by Hadoop HDFS proxy.
 
-  HttpFS can be seening as a full rewrite of Hadoop HDFS proxy.
+  HttpFS can be seen as a full rewrite of Hadoop HDFS proxy.
 
   Hadoop HDFS proxy provides a subset of file system operations (read only),
   HttpFS provides support for all file system operations.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3c4294b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c9bee1a..6849229 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -775,6 +775,8 @@ Release 2.7.0 - UNRELEASED
 causes the fsvolume reference being released incorrectly. (Lei Xu via
 yliu)
 
+HDFS-7644. minor typo in HttpFS doc (Charles Lamb via aw)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[07/50] [abbrv] hadoop git commit: HDFS-7641. Update archival storage user doc for list/set/get block storage policies. (yliu)

2015-01-26 Thread zhz
HDFS-7641. Update archival storage user doc for list/set/get block storage 
policies. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6d1f21e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6d1f21e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6d1f21e

Branch: refs/heads/HDFS-EC
Commit: a6d1f21e2a540856d29e59d1fe4dd824343dc19a
Parents: 3111cfb
Author: yliu y...@apache.org
Authored: Wed Jan 21 02:06:18 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:25 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../src/site/apt/ArchivalStorage.apt.vm | 22 ++--
 2 files changed, 14 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6d1f21e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 932fee8..964e278 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -741,6 +741,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7637. Fix the check condition for reserved path. (Yi Liu via jing9)
 
+HDFS-7641. Update archival storage user doc for list/set/get block storage
+policies. (yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6d1f21e/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
index 69674c7..5336ea3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
@@ -189,7 +189,7 @@ hdfs mover [-p files/dirs | -f local file name]
   * Command:
 
 +--+
-hdfs storagepolicies
+hdfs storagepolicies -listPolicies
 +--+
 
   * Arguments: none.
@@ -201,16 +201,16 @@ hdfs storagepolicies
   * Command:
 
 +--+
-hdfs dfsadmin -setStoragePolicy path policyName
+hdfs storagepolicies -setStoragePolicy -path path -policy policy
 +--+
 
   * Arguments:
 
-*--+-+
-| \path\   | The path referring to either a directory or a file. |
-*--+-+
-| \policyName\ | The name of the storage policy. |
-*--+-+
+*--+-+
+| -path \path\ | The path referring to either a directory or a 
file. |
+*--+-+
+| -policy \policy\ | The name of the storage policy.   
  |
+*--+-+
 
   []
 
@@ -221,13 +221,13 @@ hdfs dfsadmin -setStoragePolicy path policyName
   * Command:
 
 +--+
-hdfs dfsadmin -getStoragePolicy path
+hdfs storagepolicies -getStoragePolicy -path path
 +--+
 
   * Arguments:
 
-*--+-+
-| \path\   | The path referring to either a directory or a file. |
-*--+-+
+*+-+
+| -path \path\   | The path referring to either a directory or a 
file. |
+*+-+
 
   []



[46/50] [abbrv] hadoop git commit: YARN-3024. LocalizerRunner should give DIE action when all resources are localized. Contributed by Chengbing Liu

2015-01-26 Thread zhz
YARN-3024. LocalizerRunner should give DIE action when all resources are
localized. Contributed by Chengbing Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3435388
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3435388
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3435388

Branch: refs/heads/HDFS-EC
Commit: f343538850878d208864af3faea2b430d9ab0a47
Parents: 5adb125
Author: Xuan xg...@apache.org
Authored: Sun Jan 25 19:37:57 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:31 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../localizer/ResourceLocalizationService.java  | 99 
 .../TestResourceLocalizationService.java| 71 +-
 3 files changed, 91 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3435388/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7263c6f..0808678 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -200,6 +200,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2800. Remove MemoryNodeLabelsStore and add a way to enable/disable
 node labels feature. (Wangda Tan via ozawa)
 
+YARN-3024. LocalizerRunner should give DIE action when all resources are
+localized. (Chengbing Liu via xgong)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3435388/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 5440980..2f4fa5e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -763,7 +763,7 @@ public class ResourceLocalizationService extends 
CompositeService
*/
 
   if (rsrc.tryAcquire()) {
-if (rsrc.getState().equals(ResourceState.DOWNLOADING)) {
+if (rsrc.getState() == ResourceState.DOWNLOADING) {
   LocalResource resource = request.getResource().getRequest();
   try {
 Path publicRootPath =
@@ -895,7 +895,7 @@ public class ResourceLocalizationService extends 
CompositeService
  LocalizedResource nRsrc = evt.getResource();
  // Resource download should take place ONLY if resource is in
  // Downloading state
- if (!ResourceState.DOWNLOADING.equals(nRsrc.getState())) {
+ if (nRsrc.getState() != ResourceState.DOWNLOADING) {
i.remove();
continue;
  }
@@ -906,7 +906,7 @@ public class ResourceLocalizationService extends 
CompositeService
   * 2) Resource is still in DOWNLOADING state
   */
  if (nRsrc.tryAcquire()) {
-   if (nRsrc.getState().equals(ResourceState.DOWNLOADING)) {
+   if (nRsrc.getState() == ResourceState.DOWNLOADING) {
  LocalResourceRequest nextRsrc = nRsrc.getRequest();
  LocalResource next =
  recordFactory.newRecordInstance(LocalResource.class);
@@ -936,41 +936,9 @@ public class ResourceLocalizationService extends 
CompositeService
   String user = context.getUser();
   ApplicationId applicationId =
   
context.getContainerId().getApplicationAttemptId().getApplicationId();
-  // The localizer has just spawned. Start giving it resources for
-  // remote-fetching.
-  if (remoteResourceStatuses.isEmpty()) {
-LocalResource next = findNextResource();
-if (next != null) {
-  response.setLocalizerAction(LocalizerAction.LIVE);
-  try {
-ArrayListResourceLocalizationSpec rsrcs =
-new ArrayListResourceLocalizationSpec();
-ResourceLocalizationSpec rsrc =
-NodeManagerBuilderUtils.newResourceLocalizationSpec(next,
-

[12/50] [abbrv] hadoop git commit: HADOOP-11484: move CHANGES.txt entry to 3.0

2015-01-26 Thread zhz
HADOOP-11484: move CHANGES.txt entry to 3.0


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc9978a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc9978a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc9978a7

Branch: refs/heads/HDFS-EC
Commit: dc9978a70457f6ed9d94ee68b40c87bf0282cd73
Parents: a865d7c
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Jan 21 11:32:31 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:26 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc9978a7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3bd67fe..2ab8ea3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -284,6 +284,9 @@ Trunk (Unreleased)
 HDFS-7643. Test case to ensure lazy persist files cannot be truncated.
 (Yi Liu via Arpit Agarwal)
 
+HADOOP-11484. hadoop-mapreduce-client-nativetask fails to build on ARM
+AARCH64 due to x86 asm statements (Edward Nevill via Colin P. McCabe)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -753,9 +756,6 @@ Release 2.7.0 - UNRELEASED
 HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
 Colin P. McCabe)
 
-HADOOP-11484. hadoop-mapreduce-client-nativetask fails to build on ARM
-AARCH64 due to x86 asm statements (Edward Nevill via Colin P. McCabe)
-
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[15/50] [abbrv] hadoop git commit: HADOOP-11484. hadoop-mapreduce-client-nativetask fails to build on ARM AARCH64 due to x86 asm statements (Edward Nevill via Colin P. McCabe)

2015-01-26 Thread zhz
HADOOP-11484. hadoop-mapreduce-client-nativetask fails to build on ARM AARCH64 
due to x86 asm statements (Edward Nevill via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a865d7c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a865d7c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a865d7c4

Branch: refs/heads/HDFS-EC
Commit: a865d7c4f9b3ecc6c7fb52edb50a70b5cd326f6d
Parents: cf76280
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Jan 21 11:24:09 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:26 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../src/main/native/src/lib/primitives.h | 8 
 .../src/main/native/src/util/Checksum.cc | 5 +
 3 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a865d7c4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1801d2f..3bd67fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -753,6 +753,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
 Colin P. McCabe)
 
+HADOOP-11484. hadoop-mapreduce-client-nativetask fails to build on ARM
+AARCH64 due to x86 asm statements (Edward Nevill via Colin P. McCabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a865d7c4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
index 4c0c1a7..3bf5f76 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h
@@ -97,11 +97,18 @@ inline void simple_memcpy(void * dest, const void * src, 
size_t len) {
  * little-endian to big-endian or vice versa
  */
 inline uint32_t bswap(uint32_t val) {
+#ifdef __aarch64__
+  __asm__(rev %w[dst], %w[src] : [dst]=r(val) : [src]r(val));
+#else
   __asm__(bswap %0 : =r (val) : 0 (val));
+#endif
   return val;
 }
 
 inline uint64_t bswap64(uint64_t val) {
+#ifdef __aarch64__
+  __asm__(rev %[dst], %[src] : [dst]=r(val) : [src]r(val));
+#else
 #ifdef __X64
   __asm__(bswapq %0 : =r (val) : 0 (val));
 #else
@@ -115,6 +122,7 @@ inline uint64_t bswap64(uint64_t val) {
   return (lower  32) + higher;
 
 #endif
+#endif
   return val;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a865d7c4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/Checksum.cc
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/Checksum.cc
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/Checksum.cc
index 191e093..be800c5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/Checksum.cc
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/util/Checksum.cc
@@ -579,6 +579,11 @@ const uint32_t CRC32C_T8_7[256] = {0x, 0x493C7D27, 
0x9278FA4E, 0xDB44876
 0xCF56CE31, 0x14124958, 0x5D2E347F, 0xE54C35A1, 0xAC704886, 0x7734CFEF, 
0x3E08B2C8, 0xC451B7CC,
 0x8D6DCAEB, 0x56294D82, 0x1F1530A5};
 
+#ifdef __aarch64__
+// Awaiting HW implementation
+#define SOFTWARE_CRC
+#endif
+
 #ifndef SOFTWARE_CRC
 #define USE_HARDWARE_CRC32C 1
 #endif



[02/50] [abbrv] hadoop git commit: HDFS-7610. Add CHANGES.txt

2015-01-26 Thread zhz
HDFS-7610. Add CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49959239
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49959239
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49959239

Branch: refs/heads/HDFS-EC
Commit: 499592392d82b9d9bfb03d9050da6ed19061c26d
Parents: ef4453d
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Jan 20 20:14:11 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:25 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49959239/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b032a48..2c7fbc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -747,6 +747,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7496. Fix FsVolume removal race conditions on the DataNode by
 reference-counting the volume instances (lei via cmccabe)
 
+HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
+Colin P. McCabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[24/50] [abbrv] hadoop git commit: MAPREDUCE-3283. mapred classpath CLI does not display the complete classpath. Contributed by Varun Saxena.

2015-01-26 Thread zhz
MAPREDUCE-3283. mapred classpath CLI does not display the complete classpath. 
Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1ad0a88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1ad0a88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1ad0a88

Branch: refs/heads/HDFS-EC
Commit: c1ad0a881b9188db71da5d9d0b7735da9cbcd04e
Parents: a8c59ba
Author: cnauroth cnaur...@apache.org
Authored: Wed Jan 21 13:50:39 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:27 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 hadoop-mapreduce-project/bin/mapred |  4 +---
 hadoop-mapreduce-project/bin/mapred.cmd | 10 +-
 3 files changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1ad0a88/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index a7379ec..489369d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -307,6 +307,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6206. TestAggregatedTransferRate fails on non-US systems (Jens
 Rabe via jlowe)
 
+MAPREDUCE-3283. mapred classpath CLI does not display the complete 
classpath
+(Varun Saxena via cnauroth)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1ad0a88/hadoop-mapreduce-project/bin/mapred
--
diff --git a/hadoop-mapreduce-project/bin/mapred 
b/hadoop-mapreduce-project/bin/mapred
index 066c438..6d0c781 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -78,9 +78,7 @@ case ${COMMAND} in
 HADOOP_OPTS=${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}
   ;;
   classpath)
-hadoop_finalize
-echo ${CLASSPATH}
-exit 0
+hadoop_do_classpath_subcommand $@ 
   ;;
   distcp)
 CLASS=org.apache.hadoop.tools.DistCp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1ad0a88/hadoop-mapreduce-project/bin/mapred.cmd
--
diff --git a/hadoop-mapreduce-project/bin/mapred.cmd 
b/hadoop-mapreduce-project/bin/mapred.cmd
index bb59c03..4085599 100644
--- a/hadoop-mapreduce-project/bin/mapred.cmd
+++ b/hadoop-mapreduce-project/bin/mapred.cmd
@@ -95,6 +95,14 @@ if %1 == --loglevel (
   @rem add modules to CLASSPATH
   set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\modules\*
 
+  if %mapred-command% == classpath (
+if not defined mapred-command-arguments (
+  @rem No need to bother starting up a JVM for this simple case.
+  @echo %CLASSPATH%
+  exit /b
+)
+  )
+
   call :%mapred-command% %mapred-command-arguments%
   set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% 
%CLASS% %mapred-command-arguments%
   call %JAVA% %java_arguments%
@@ -103,7 +111,7 @@ goto :eof
 
 
 :classpath
-  @echo %CLASSPATH%
+  set CLASS=org.apache.hadoop.util.Classpath
   goto :eof
 
 :job



[08/50] [abbrv] hadoop git commit: HDFS-7496: add to CHANGES.txt

2015-01-26 Thread zhz
HDFS-7496: add to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c130aec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c130aec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c130aec

Branch: refs/heads/HDFS-EC
Commit: 8c130aecc562c9334581f13f06264e83ae352d1f
Parents: 9014305
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Jan 20 19:32:19 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:25 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c130aec/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 964e278..b032a48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -744,6 +744,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7641. Update archival storage user doc for list/set/get block storage
 policies. (yliu)
 
+HDFS-7496. Fix FsVolume removal race conditions on the DataNode by
+reference-counting the volume instances (lei via cmccabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[21/50] [abbrv] hadoop git commit: HDFS-7430. Refactor the BlockScanner to use O(1) memory and use multiple threads (cmccabe)

2015-01-26 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/df4edd9a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
new file mode 100644
index 000..781b4d3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -0,0 +1,652 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.util.DataTransferThrottler;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * VolumeScanner scans a single volume.  Each VolumeScanner has its own 
thread.p/
+ * They are all managed by the DataNode's BlockScanner.
+ */
+public class VolumeScanner extends Thread {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(VolumeScanner.class);
+
+  /**
+   * Number of seconds in a minute.
+   */
+  private final static int SECONDS_PER_MINUTE = 60;
+
+  /**
+   * Number of minutes in an hour.
+   */
+  private final static int MINUTES_PER_HOUR = 60;
+
+  /**
+   * Name of the block iterator used by this scanner.
+   */
+  private final static String BLOCK_ITERATOR_NAME = scanner;
+
+  /**
+   * The configuration.
+   */
+  private final Conf conf;
+
+  /**
+   * The DataNode this VolumEscanner is associated with.
+   */
+  private final DataNode datanode;
+
+  /**
+   * A reference to the volume that we're scanning.
+   */
+  private final FsVolumeReference ref;
+
+  /**
+   * The volume that we're scanning.
+   */
+  final FsVolumeSpi volume;
+
+  /**
+   * The number of scanned bytes in each minute of the last hour.p/
+   *
+   * This array is managed as a circular buffer.  We take the monotonic time 
and
+   * divide it up into one-minute periods.  Each entry in the array represents
+   * how many bytes were scanned during that period.
+   */
+  private final long scannedBytes[] = new long[MINUTES_PER_HOUR];
+
+  /**
+   * The sum of all the values of scannedBytes.
+   */
+  private long scannedBytesSum = 0;
+
+  /**
+   * The throttler to use with BlockSender objects.
+   */
+  private final DataTransferThrottler throttler = new DataTransferThrottler(1);
+
+  /**
+   * The null output stream to use with BlockSender objects.
+   */
+  private final DataOutputStream nullStream =
+  new DataOutputStream(new IOUtils.NullOutputStream());
+
+  /**
+   * The block iterators associated with this VolumeScanner.p/
+   *
+   * Each block pool has its own BlockIterator.
+   */
+  private final ListBlockIterator blockIters =
+  new LinkedListBlockIterator();
+
+  /**
+   * The current block iterator, or null if there is none.
+   */
+  private BlockIterator curBlockIter = null;
+
+  /**
+   * True if the thread is stopping.p/
+   * Protected by this object's lock.
+   */
+  private boolean stopping = false;
+
+  /**
+   * The current minute, in monotonic terms.
+   */
+  private long curMinute = 0;
+
+  /**
+   * Handles scan results.
+   */
+  private final ScanResultHandler resultHandler;
+
+  private final Statistics stats = new Statistics();
+
+  static class 

[19/50] [abbrv] hadoop git commit: HDFS-7430. Refactor the BlockScanner to use O(1) memory and use multiple threads (cmccabe)

2015-01-26 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/df4edd9a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
index cffb930..4c703ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
@@ -48,7 +48,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
-import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner;
+import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -89,7 +89,7 @@ public class SnapshotTestHelper {
 GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
 GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));
 
-GenericTestUtils.disableLog(DataBlockScanner.LOG);
+GenericTestUtils.disableLog(BlockScanner.LOG);
 GenericTestUtils.disableLog(HttpServer2.LOG);
 GenericTestUtils.disableLog(DataNode.LOG);
 GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);



[20/50] [abbrv] hadoop git commit: HDFS-7430. Refactor the BlockScanner to use O(1) memory and use multiple threads (cmccabe)

2015-01-26 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/df4edd9a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
deleted file mode 100644
index 9e78c10..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
+++ /dev/null
@@ -1,551 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.TimeoutException;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.log4j.Level;
-import org.junit.Test;
-
-/**
- * This test verifies that block verification occurs on the datanode
- */
-public class TestDatanodeBlockScanner {
-  
-  private static final Log LOG = 
- LogFactory.getLog(TestDatanodeBlockScanner.class);
-  
-  private static final long TIMEOUT = 2; // 20 sec.
-  
-  private static final Pattern pattern =
- Pattern.compile(.*?(blk_[-]*\\d+).*?scan time\\s*:\\s*(\\d+));
-  
-  private static final Pattern pattern_blockVerify =
- Pattern.compile(.*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?));
-  
-  static {
-((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.WARN);
-  }
-  /**
-   * This connects to datanode and fetches block verification data.
-   * It repeats this until the given block has a verification time  newTime.
-   * @param newTime - validation timestamps before newTime are old, the
-   *result of previous validations.  This method waits until a 
new
-   *validation timestamp is obtained.  If no validator runs soon
-   *enough, the method will time out.
-   * @return - the new validation timestamp
-   * @throws IOException
-   * @throws TimeoutException
-   */
-  private static long waitForVerification(int infoPort, FileSystem fs, 
-  Path file, int blocksValidated, 
-  long newTime, long timeout) 
-  throws IOException, TimeoutException {
-URL url = new URL(http://localhost:; + infoPort +
-  /blockScannerReport?listblocks);
-long lastWarnTime = Time.monotonicNow();
-if (newTime = 0) newTime = 1L;
-long verificationTime = 0;
-
-String block = DFSTestUtil.getFirstBlock(fs, file).getBlockName();
-long failtime = (timeout = 0) ? Long.MAX_VALUE 
-: Time.monotonicNow() + timeout;
-while (verificationTime  newTime) {
-  if (failtime  Time.monotonicNow()) {
-throw new TimeoutException(failed to achieve block verification after 

-+ timeout +  msec.  Current verification timestamp = 
-+ verificationTime + , requested verification time   
-  

[35/50] [abbrv] hadoop git commit: HDFS-7660. BlockReceiver#close() might be called multiple times, which causes the fsvolume reference being released incorrectly. (Lei Xu via yliu)

2015-01-26 Thread zhz
HDFS-7660. BlockReceiver#close() might be called multiple times, which causes 
the fsvolume reference being released incorrectly. (Lei Xu via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a96c6de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a96c6de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a96c6de

Branch: refs/heads/HDFS-EC
Commit: 8a96c6dee1a68969d31799a30d6f48894a4d13c1
Parents: c265a0e
Author: yliu y...@apache.org
Authored: Fri Jan 23 02:37:44 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:29 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 4 
 .../org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java| 3 ++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a96c6de/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 74eb160..9176ec7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -768,6 +768,10 @@ Release 2.7.0 - UNRELEASED
 HDFS-3519. Checkpoint upload may interfere with a concurrent saveNamespace.
 (Ming Ma via cnauroth)
 
+HDFS-7660. BlockReceiver#close() might be called multiple times, which
+causes the fsvolume reference being released incorrectly. (Lei Xu via
+yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a96c6de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 12041a6..3d37df5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -123,7 +123,7 @@ class BlockReceiver implements Closeable {
   private boolean syncOnClose;
   private long restartBudget;
   /** the reference of the volume where the block receiver writes to */
-  private final ReplicaHandler replicaHandler;
+  private ReplicaHandler replicaHandler;
 
   /**
* for replaceBlock response
@@ -334,6 +334,7 @@ class BlockReceiver implements Closeable {
 }
 if (replicaHandler != null) {
   IOUtils.cleanup(null, replicaHandler);
+  replicaHandler = null;
 }
 if (measuredFlushTime) {
   datanode.metrics.addFlushNanos(flushTotalNanos);



[32/50] [abbrv] hadoop git commit: HADOOP-11482. Use correct UGI when KMSClientProvider is called by a proxy user. Contributed by Arun Suresh.

2015-01-26 Thread zhz
HADOOP-11482. Use correct UGI when KMSClientProvider is called by a proxy user. 
Contributed by Arun Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e02d860
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e02d860
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e02d860

Branch: refs/heads/HDFS-EC
Commit: 2e02d860186572e539cad7f9f4a7723ab7b43237
Parents: 5177c14
Author: Andrew Wang w...@apache.org
Authored: Fri Jan 23 12:11:15 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:29 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../crypto/key/kms/KMSClientProvider.java   | 31 
 .../hadoop/crypto/key/kms/server/TestKMS.java   |  7 +
 3 files changed, 35 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e02d860/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6bedd4d..6a34092 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -756,6 +756,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11507 Hadoop RPC Authentication problem with different user locale.
 (Talat UYARER via stevel)
 
+HADOOP-11482. Use correct UGI when KMSClientProvider is called by a proxy
+user. Contributed by Arun Suresh.
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e02d860/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 0464f55..97ab253 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -787,25 +787,44 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   }
 
   @Override
-  public Token?[] addDelegationTokens(String renewer,
+  public Token?[] addDelegationTokens(final String renewer,
   Credentials credentials) throws IOException {
 Token?[] tokens = null;
 Text dtService = getDelegationTokenService();
 Token? token = credentials.getToken(dtService);
 if (token == null) {
-  URL url = createURL(null, null, null, null);
-  DelegationTokenAuthenticatedURL authUrl =
+  final URL url = createURL(null, null, null, null);
+  final DelegationTokenAuthenticatedURL authUrl =
   new DelegationTokenAuthenticatedURL(configurator);
   try {
-token = authUrl.getDelegationToken(url, authToken, renewer);
+// 'actualUGI' is the UGI of the user creating the client 
+// It is possible that the creator of the KMSClientProvier
+// calls this method on behalf of a proxyUser (the doAsUser).
+// In which case this call has to be made as the proxy user.
+UserGroupInformation currentUgi = 
UserGroupInformation.getCurrentUser();
+final String doAsUser = (currentUgi.getAuthenticationMethod() ==
+UserGroupInformation.AuthenticationMethod.PROXY)
+? currentUgi.getShortUserName() : null;
+
+token = actualUgi.doAs(new PrivilegedExceptionActionToken?() {
+  @Override
+  public Token? run() throws Exception {
+// Not using the cached token here.. Creating a new token here
+// everytime.
+return authUrl.getDelegationToken(url,
+new DelegationTokenAuthenticatedURL.Token(), renewer, 
doAsUser);
+  }
+});
 if (token != null) {
   credentials.addToken(token.getService(), token);
   tokens = new Token?[] { token };
 } else {
   throw new IOException(Got NULL as delegation token);
 }
-  } catch (AuthenticationException ex) {
-throw new IOException(ex);
+  } catch (InterruptedException e) {
+Thread.currentThread().interrupt();
+  } catch (Exception e) {
+throw new IOException(e);
   }
 }
 return tokens;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e02d860/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java

[34/50] [abbrv] hadoop git commit: HADOOP-11507 Hadoop RPC Authentication problem with different user locale. (Talat UYARER via stevel)

2015-01-26 Thread zhz
HADOOP-11507 Hadoop RPC Authentication problem with different user locale. 
(Talat UYARER via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5177c148
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5177c148
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5177c148

Branch: refs/heads/HDFS-EC
Commit: 5177c148d22a08f6cd9015ab6f8d7050ccbbade2
Parents: 7d62e90
Author: Steve Loughran ste...@apache.org
Authored: Fri Jan 23 19:36:37 2015 +
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:29 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/security/SaslPropertiesResolver.java   | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5177c148/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bab2220..6bedd4d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -753,6 +753,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11493. Fix some typos in kms-acls.xml description.
 (Charles Lamb via aajisaka)
 
+HADOOP-11507 Hadoop RPC Authentication problem with different user locale.
+(Talat UYARER via stevel)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5177c148/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java
index c4fc965..0b49cfb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.security;
 
 import java.net.InetAddress;
+import java.util.Locale;
 import java.util.Map;
 import java.util.TreeMap;
 
@@ -65,7 +66,7 @@ public class SaslPropertiesResolver implements Configurable{
 CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION,
 QualityOfProtection.AUTHENTICATION.toString());
 for (int i=0; i  qop.length; i++) {
-  qop[i] = QualityOfProtection.valueOf(qop[i].toUpperCase()).getSaslQop();
+  qop[i] = 
QualityOfProtection.valueOf(qop[i].toUpperCase(Locale.ENGLISH)).getSaslQop();
 }
 properties.put(Sasl.QOP, StringUtils.join(,, qop));
 properties.put(Sasl.SERVER_AUTH, true);



[44/50] [abbrv] hadoop git commit: Move truncate related messages in CHANGES.txt to branch 2 section. (shv)

2015-01-26 Thread zhz
Move truncate related messages in CHANGES.txt to branch 2 section. (shv)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e461d62c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e461d62c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e461d62c

Branch: refs/heads/HDFS-EC
Commit: e461d62cfa6111cb5d31de2b9d1a024cf6978b9f
Parents: 27c98cc
Author: Konstantin V Shvachko s...@apache.org
Authored: Sat Jan 24 17:49:22 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:30 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  6 ++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 34 ++--
 2 files changed, 20 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e461d62c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6a34092..c0617e8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -19,9 +19,6 @@ Trunk (Unreleased)
 
 HADOOP-11353. Add support for .hadooprc (aw)
 
-HADOOP-11490. Expose truncate API via FileSystem and shell command.
-(Milan Desai via shv)
-
   IMPROVEMENTS
 
 HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
@@ -386,6 +383,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-8989. hadoop fs -find feature (Jonathan Allen via aw)
 
+HADOOP-11490. Expose truncate API via FileSystem and shell command.
+(Milan Desai via shv)
+
   IMPROVEMENTS
 
 HADOOP-11483. HardLink.java should use the jdk7 createLink method 
(aajisaka)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e461d62c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cca755e..21c8374 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -18,10 +18,6 @@ Trunk (Unreleased)
 
 HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
 
-HDFS-3107. Introduce truncate. (Plamen Jeliazkov via shv)
-
-HDFS-7056. Snapshot support for truncate. (Plamen Jeliazkov and shv)
-
   IMPROVEMENTS
 
 HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.
@@ -143,9 +139,6 @@ Trunk (Unreleased)
 HDFS-7430. Rewrite the BlockScanner to use O(1) memory and use multiple
 threads (cmccabe)
 
-HDFS-7659. truncate should check negative value of the new length.
-(Yi Liu via shv)
-
   OPTIMIZATIONS
 
   BUG FIXES
@@ -280,16 +273,6 @@ Trunk (Unreleased)
 
 HDFS-7581. HDFS documentation needs updating post-shell rewrite (aw)
 
-HDFS-7606. Fix potential NPE in INodeFile.getBlocks(). (Byron Wong via shv)
-
-HDFS-7638: Small fix and few refinements for FSN#truncate. (yliu)
-
-HDFS-7634. Disallow truncation of Lazy persist files. (Yi Liu via
-Arpit Agarwal)
-
-HDFS-7643. Test case to ensure lazy persist files cannot be truncated.
-(Yi Liu via Arpit Agarwal)
-
 HADOOP-11484. hadoop-mapreduce-client-nativetask fails to build on ARM
 AARCH64 due to x86 asm statements (Edward Nevill via Colin P. McCabe)
 
@@ -320,6 +303,10 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7449. Add metrics to NFS gateway (brandonli)
 
+HDFS-3107. Introduce truncate. (Plamen Jeliazkov via shv)
+
+HDFS-7056. Snapshot support for truncate. (Plamen Jeliazkov and shv)
+
   IMPROVEMENTS
 
 HDFS-7055. Add tracing to DFSInputStream (cmccabe)
@@ -785,6 +772,19 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7644. minor typo in HttpFS doc (Charles Lamb via aw)
 
+HDFS-7606. Fix potential NPE in INodeFile.getBlocks(). (Byron Wong via shv)
+
+HDFS-7638: Small fix and few refinements for FSN#truncate. (yliu)
+
+HDFS-7634. Disallow truncation of Lazy persist files. (Yi Liu via
+Arpit Agarwal)
+
+HDFS-7643. Test case to ensure lazy persist files cannot be truncated.
+(Yi Liu via Arpit Agarwal)
+
+HDFS-7659. truncate should check negative value of the new length.
+(Yi Liu via shv)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[31/50] [abbrv] hadoop git commit: HDFS-3519. Checkpoint upload may interfere with a concurrent saveNamespace. Contributed by Ming Ma.

2015-01-26 Thread zhz
HDFS-3519. Checkpoint upload may interfere with a concurrent saveNamespace. 
Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3b92a12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3b92a12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3b92a12

Branch: refs/heads/HDFS-EC
Commit: d3b92a1230dab9009ee71ca4aa1a796be8927fb8
Parents: 8262acf
Author: cnauroth cnaur...@apache.org
Authored: Thu Jan 22 16:26:21 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:28 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/namenode/FSImage.java| 56 
 .../hdfs/server/namenode/ImageServlet.java  | 18 +++
 .../namenode/ha/TestStandbyCheckpoints.java |  2 +-
 4 files changed, 58 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3b92a12/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7c5c639..74eb160 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -765,6 +765,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7575. Upgrade should generate a unique storage ID for each
 volume. (Arpit Agarwal)
 
+HDFS-3519. Checkpoint upload may interfere with a concurrent saveNamespace.
+(Ming Ma via cnauroth)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3b92a12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 8ac6926..3b5d2c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -29,9 +29,11 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -97,6 +99,15 @@ public class FSImage implements Closeable {
 
   protected NNStorageRetentionManager archivalManager;
 
+  /* Used to make sure there are no concurrent checkpoints for a given txid
+   * The checkpoint here could be one of the following operations.
+   * a. checkpoint when NN is in standby.
+   * b. admin saveNameSpace operation.
+   * c. download checkpoint file from any remote checkpointer.
+  */
+  private final SetLong currentlyCheckpointing =
+  Collections.LongsynchronizedSet(new HashSetLong());
+
   /**
* Construct an FSImage
* @param conf Configuration
@@ -1058,18 +1069,26 @@ public class FSImage implements Closeable {
   editLog.endCurrentLogSegment(true);
 }
 long imageTxId = getLastAppliedOrWrittenTxId();
+if (!addToCheckpointing(imageTxId)) {
+  throw new IOException(
+  FS image is being downloaded from another NN at txid  + imageTxId);
+}
 try {
-  saveFSImageInAllDirs(source, nnf, imageTxId, canceler);
-  storage.writeAll();
-} finally {
-  if (editLogWasOpen) {
-editLog.startLogSegmentAndWriteHeaderTxn(imageTxId + 1);
-// Take this opportunity to note the current transaction.
-// Even if the namespace save was cancelled, this marker
-// is only used to determine what transaction ID is required
-// for startup. So, it doesn't hurt to update it unnecessarily.
-storage.writeTransactionIdFileToStorage(imageTxId + 1);
+  try {
+saveFSImageInAllDirs(source, nnf, imageTxId, canceler);
+storage.writeAll();
+  } finally {
+if (editLogWasOpen) {
+  editLog.startLogSegmentAndWriteHeaderTxn(imageTxId + 1);
+  // Take this opportunity to note the current transaction.
+  // Even if the namespace save was cancelled, this marker
+  // is only used to determine what transaction ID is required
+  // for startup. So, it doesn't hurt to update it unnecessarily.
+  storage.writeTransactionIdFileToStorage(imageTxId + 1);
+}
   }
+} finally {
+  removeFromCheckpointing(imageTxId);
 }
   }
 
@@ -1078,7 +1097,22 @@ public 

[23/50] [abbrv] hadoop git commit: YARN-3078. LogCLIHelpers lacks of a blank space before string 'does not exist'. Contributed by Sam Liu.

2015-01-26 Thread zhz
YARN-3078. LogCLIHelpers lacks of a blank space before string 'does not exist'. 
Contributed by Sam Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73309fbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73309fbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73309fbc

Branch: refs/heads/HDFS-EC
Commit: 73309fbc8d8addb61893025677893da8b81f0e87
Parents: df4edd9
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Jan 22 12:25:05 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:27 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73309fbc/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cab71a3..bdc31db 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -397,6 +397,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2731. Fixed RegisterApplicationMasterResponsePBImpl to properly 
invoke 
 maybeInitBuilder. (Carlo Curino via wangda)
 
+YARN-3078. LogCLIHelpers lacks of a blank space before string 'does not 
exist'.
+(Sam Liu via ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73309fbc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
index 1546ece..df9bd32 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
@@ -212,11 +212,11 @@ public class LogCLIHelpers implements Configurable {
   }
 
   private static void logDirNotExist(String remoteAppLogDir) {
-System.out.println(remoteAppLogDir + does not exist.);
+System.out.println(remoteAppLogDir +  does not exist.);
 System.out.println(Log aggregation has not completed or is not enabled.);
   }
 
   private static void emptyLogDir(String remoteAppLogDir) {
-System.out.println(remoteAppLogDir + does not have any log files.);
+System.out.println(remoteAppLogDir +  does not have any log files.);
   }
 }



[05/50] [abbrv] hadoop git commit: HDFS-7496. Fix FsVolume removal race conditions on the DataNode by reference-counting the volume instances (lei via cmccabe)

2015-01-26 Thread zhz
HDFS-7496. Fix FsVolume removal race conditions on the DataNode by 
reference-counting the volume instances (lei via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9014305e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9014305e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9014305e

Branch: refs/heads/HDFS-EC
Commit: 9014305e0bb8cca43fd886562bf7fade545f58ac
Parents: a6d1f21
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Jan 20 19:05:33 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:25 2015 -0800

--
 .../hdfs/server/datanode/BlockReceiver.java |  58 ++--
 .../hdfs/server/datanode/BlockSender.java   |  10 +
 .../hdfs/server/datanode/ReplicaHandler.java|  49 
 .../server/datanode/fsdataset/FsDatasetSpi.java |  13 +-
 .../datanode/fsdataset/FsVolumeReference.java   |  48 
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  10 +
 .../datanode/fsdataset/ReplicaInputStreams.java |   6 +-
 .../impl/FsDatasetAsyncDiskService.java |  16 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 284 ---
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 121 +++-
 .../datanode/fsdataset/impl/FsVolumeList.java   |  76 -
 .../impl/RamDiskAsyncLazyPersistService.java|  19 +-
 .../src/main/proto/datatransfer.proto   |   2 +-
 .../hdfs/TestWriteBlockGetsBlockLengthHint.java |   2 +-
 .../server/datanode/SimulatedFSDataset.java |  35 ++-
 .../hdfs/server/datanode/TestBlockRecovery.java |   2 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|  33 ++-
 .../server/datanode/TestDirectoryScanner.java   |   9 +-
 .../server/datanode/TestSimulatedFSDataset.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |  22 +-
 .../datanode/extdataset/ExternalVolumeImpl.java |   7 +
 .../fsdataset/impl/FsVolumeListTest.java|  94 ++
 .../fsdataset/impl/TestFsDatasetImpl.java   |   7 +-
 .../fsdataset/impl/TestWriteToReplica.java  |  12 +-
 24 files changed, 717 insertions(+), 220 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9014305e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 08c96be..df8dd5c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -26,7 +26,6 @@ import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileDescriptor;
 import java.io.FileOutputStream;
-import java.io.FileWriter;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.OutputStreamWriter;
@@ -49,10 +48,8 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
-import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
@@ -125,6 +122,8 @@ class BlockReceiver implements Closeable {
 
   private boolean syncOnClose;
   private long restartBudget;
+  /** the reference of the volume where the block receiver writes to */
+  private final ReplicaHandler replicaHandler;
 
   /**
* for replaceBlock response
@@ -179,48 +178,50 @@ class BlockReceiver implements Closeable {
   // Open local disk out
   //
   if (isDatanode) { //replication or move
-replicaInfo = datanode.data.createTemporary(storageType, block);
+replicaHandler = datanode.data.createTemporary(storageType, block);
   } else {
 switch (stage) {
 case PIPELINE_SETUP_CREATE:
-  replicaInfo = datanode.data.createRbw(storageType, block, 
allowLazyPersist);
+  replicaHandler = datanode.data.createRbw(storageType, block, 
allowLazyPersist);
   datanode.notifyNamenodeReceivingBlock(
-  block, replicaInfo.getStorageUuid());
+

[48/50] [abbrv] hadoop git commit: HADOOP-11450. Cleanup DistCpV1 not to use deprecated methods and fix javadocs. Contributed by Varun Saxena.

2015-01-26 Thread zhz
HADOOP-11450. Cleanup DistCpV1 not to use deprecated methods and fix javadocs. 
Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2de93dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2de93dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2de93dd

Branch: refs/heads/HDFS-EC
Commit: b2de93dd330ba4a2e3a087bdafd840067f4cc43a
Parents: f343538
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon Jan 26 12:58:38 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:31 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/tools/DistCpV1.java  | 152 ---
 2 files changed, 68 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2de93dd/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8618e38..662f580 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -499,6 +499,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11419 Improve hadoop-maven-plugins. (Herve Boutemy via stevel)
 
+HADOOP-11450. Cleanup DistCpV1 not to use deprecated methods and fix
+javadocs. (Varun Saxena via ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2de93dd/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
--
diff --git 
a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
 
b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
index c44b67b..f46c421 100644
--- 
a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
+++ 
b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
@@ -51,9 +51,11 @@ import 
org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.SequenceFile.Reader;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.SequenceFile.Writer;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.mapred.FileOutputFormat;
 import org.apache.hadoop.mapred.FileSplit;
@@ -73,6 +75,7 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
 
 /**
  * A Map-reduce program to recursively copy directories between
@@ -283,9 +286,8 @@ public class DistCpV1 implements Tool {
   long last = 0L;
   long acc = 0L;
   long cbrem = srcst.getLen();
-  SequenceFile.Reader sl = null;
-  try {
-sl = new SequenceFile.Reader(fs, src, job);
+  try (SequenceFile.Reader sl =
+  new SequenceFile.Reader(job, Reader.file(src))) {
 for (; sl.next(key, value); last = sl.getPosition()) {
   // if adding this split would put this split past the target size,
   // cut the last split and put this next file in the next split.
@@ -299,9 +301,6 @@ public class DistCpV1 implements Tool {
   acc += key.get();
 }
   }
-  finally {
-checkAndClose(sl);
-  }
   if (cbrem != 0) {
 splits.add(new FileSplit(src, pos, cbrem, (String[])null));
   }
@@ -438,32 +437,28 @@ public class DistCpV1 implements Tool {
  */
 private long doCopyFile(FileStatus srcstat, Path tmpfile, Path absdst,
 Reporter reporter) throws IOException {
-  FSDataInputStream in = null;
-  FSDataOutputStream out = null;
   long bytesCopied = 0L;
-  try {
-Path srcPath = srcstat.getPath();
-// open src file
-in = srcPath.getFileSystem(job).open(srcPath);
+  Path srcPath = srcstat.getPath();
+  // open src file
+  try (FSDataInputStream in = srcPath.getFileSystem(job).open(srcPath)) {
 reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
 // open tmp file
-out = create(tmpfile, reporter, srcstat);
-LOG.info(Copying file  + srcPath +  of size  +
- srcstat.getLen() +  bytes...);
+try (FSDataOutputStream out = create(tmpfile, reporter, srcstat)) {
+  

[13/50] [abbrv] hadoop git commit: HADOOP-11327. BloomFilter#not() omits the last bit, resulting in an incorrect filter. Contributed by Eric Payne

2015-01-26 Thread zhz
HADOOP-11327. BloomFilter#not() omits the last bit, resulting in an incorrect 
filter. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf76280c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf76280c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf76280c

Branch: refs/heads/HDFS-EC
Commit: cf76280c7dd12cbe1ad9d03ef57851b2ee4b30a8
Parents: bd457d3
Author: Jason Lowe jl...@apache.org
Authored: Wed Jan 21 19:04:29 2015 +
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:26 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/util/bloom/BloomFilter.java   |  2 +-
 .../org/apache/hadoop/util/bloom/TestBloomFilters.java   | 11 +++
 3 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf76280c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2951002..c54800f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -731,6 +731,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-10668. TestZKFailoverControllerStress#testExpireBackAndForth
 occasionally fails. (Ming Ma via cnauroth)
 
+HADOOP-11327. BloomFilter#not() omits the last bit, resulting in an
+incorrect filter (Eric Payne via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf76280c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/BloomFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/BloomFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/BloomFilter.java
index e2dea6d..f8b9519 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/BloomFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/BloomFilter.java
@@ -157,7 +157,7 @@ public class BloomFilter extends Filter {
 
   @Override
   public void not() {
-bits.flip(0, vectorSize - 1);
+bits.flip(0, vectorSize);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf76280c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java
index 93fa6d5..6ff854d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.util.AbstractCollection;
+import java.util.BitSet;
 import java.util.Iterator;
 
 import 
org.apache.hadoop.util.bloom.BloomFilterCommonTester.BloomFilterTestStrategy;
@@ -237,4 +238,14 @@ public class TestBloomFilters {
 BloomFilterTestStrategy.FILTER_AND_STRATEGY,
 BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
   }
+
+  @Test
+  public void testNot() {
+BloomFilter bf = new BloomFilter(8, 1, Hash.JENKINS_HASH);
+bf.bits = BitSet.valueOf(new byte[] { (byte) 0x95 });
+BitSet origBitSet = (BitSet) bf.bits.clone();
+bf.not();
+assertFalse(BloomFilter#not should have inverted all bits,
+bf.bits.intersects(origBitSet));
+  }
 }



[37/50] [abbrv] hadoop git commit: YARN-2800. Remove MemoryNodeLabelsStore and add a way to enable/disable node labels feature. Contributed by Wangda Tan.

2015-01-26 Thread zhz
YARN-2800. Remove MemoryNodeLabelsStore and add a way to enable/disable node 
labels feature. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d62e907
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d62e907
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d62e907

Branch: refs/heads/HDFS-EC
Commit: 7d62e9070f2c240e16782e36f96f8cfda339d862
Parents: a06d2d6
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Fri Jan 23 20:37:05 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:29 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../hadoop/yarn/conf/YarnConfiguration.java | 16 ++--
 .../distributedshell/TestDistributedShell.java  |  1 +
 .../nodelabels/CommonNodeLabelsManager.java | 42 -
 .../nodelabels/TestCommonNodeLabelsManager.java | 62 +-
 .../TestFileSystemNodeLabelsStore.java  |  1 +
 .../server/resourcemanager/ResourceManager.java |  6 +-
 .../nodelabels/MemoryRMNodeLabelsManager.java   | 82 --
 .../yarn/server/resourcemanager/MockRM.java |  4 +-
 .../server/resourcemanager/TestRMRestart.java   |  1 +
 .../nodelabels/NullRMNodeLabelsManager.java | 90 
 .../nodelabels/TestRMNodeLabelsManager.java |  9 +-
 .../capacity/TestCapacityScheduler.java |  6 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   |  4 +-
 .../capacity/TestContainerAllocation.java   |  6 +-
 .../scheduler/capacity/TestQueueParsing.java| 12 +--
 .../resourcemanager/webapp/TestRMWebApp.java|  6 +-
 17 files changed, 233 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d62e907/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7f0628d..7263c6f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -197,6 +197,9 @@ Release 2.7.0 - UNRELEASED
 
 YARN-2984. Metrics for container's actual memory usage. (kasha)
 
+YARN-2800. Remove MemoryNodeLabelsStore and add a way to enable/disable
+node labels feature. (Wangda Tan via ozawa)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d62e907/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9ac5438..2ccd894 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1648,14 +1648,10 @@ public class YarnConfiguration extends Configuration {
   public static final String YARN_HTTP_POLICY_DEFAULT = 
HttpConfig.Policy.HTTP_ONLY
   .name();
   
-  public static final String NODE_LABELS_PREFIX = YARN_PREFIX + node-labels.;
-
   /**
-   * Class for RMNodeLabelsManager Please note this value should be consistent
-   * in client nodes and RM node(s)
+   * Node-labels configurations
*/
-  public static final String RM_NODE_LABELS_MANAGER_CLASS = NODE_LABELS_PREFIX
-  + manager-class;
+  public static final String NODE_LABELS_PREFIX = YARN_PREFIX + node-labels.;
   
   /** URI for NodeLabelManager */
   public static final String FS_NODE_LABELS_STORE_ROOT_DIR = NODE_LABELS_PREFIX
@@ -1664,6 +1660,14 @@ public class YarnConfiguration extends Configuration {
   NODE_LABELS_PREFIX + fs-store.retry-policy-spec;
   public static final String DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC =
   2000, 500;
+  
+  /**
+   * Flag to indicate if the node labels feature enabled, by default it's
+   * disabled
+   */
+  public static final String NODE_LABELS_ENABLED = NODE_LABELS_PREFIX
+  + enabled;
+  public static final boolean DEFAULT_NODE_LABELS_ENABLED = false;
 
   public YarnConfiguration() {
 super();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d62e907/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 

hadoop git commit: HADOOP-11499. Check of executorThreadsStarted in ValueQueue#submitRefillTask() evades lock acquisition. Contributed by Ted Yu (cherry picked from commit 7574df1bba33919348d3009f2578

2015-01-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2cf58ca5a - a389056af


HADOOP-11499. Check of executorThreadsStarted in ValueQueue#submitRefillTask() 
evades lock acquisition. Contributed by Ted Yu
(cherry picked from commit 7574df1bba33919348d3009f2578d6a81b5818e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a389056a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a389056a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a389056a

Branch: refs/heads/branch-2
Commit: a389056afa93cd64c1d97f23198aadf2a8107def
Parents: 2cf58ca
Author: Jason Lowe jl...@apache.org
Authored: Mon Jan 26 16:56:14 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon Jan 26 16:57:12 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../org/apache/hadoop/crypto/key/kms/ValueQueue.java| 12 +++-
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a389056a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b38d733..0bfd02a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -408,6 +408,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11482. Use correct UGI when KMSClientProvider is called by a proxy
 user. Contributed by Arun Suresh.
 
+HADOOP-11499. Check of executorThreadsStarted in
+ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a389056a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
index 8e67ecc..32451d8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
@@ -308,11 +308,13 @@ public class ValueQueue E {
   final QueueE keyQueue) throws InterruptedException {
 if (!executorThreadsStarted) {
   synchronized (this) {
-// To ensure all requests are first queued, make coreThreads =
-// maxThreads
-// and pre-start all the Core Threads.
-executor.prestartAllCoreThreads();
-executorThreadsStarted = true;
+if (!executorThreadsStarted) {
+  // To ensure all requests are first queued, make coreThreads =
+  // maxThreads
+  // and pre-start all the Core Threads.
+  executor.prestartAllCoreThreads();
+  executorThreadsStarted = true;
+}
   }
 }
 // The submit/execute method of the ThreadPoolExecutor is bypassed and



[22/50] [abbrv] hadoop git commit: HDFS-7430. Refactor the BlockScanner to use O(1) memory and use multiple threads (cmccabe)

2015-01-26 Thread zhz
HDFS-7430. Refactor the BlockScanner to use O(1) memory and use multiple 
threads (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df4edd9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df4edd9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df4edd9a

Branch: refs/heads/HDFS-EC
Commit: df4edd9aea0dc9b4dff82347b2776f7069018243
Parents: a691658
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Dec 17 11:27:48 2014 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:27 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../hdfs/server/datanode/BPOfferService.java|   3 -
 .../hdfs/server/datanode/BPServiceActor.java|   6 -
 .../server/datanode/BlockPoolSliceScanner.java  | 872 ---
 .../hdfs/server/datanode/BlockReceiver.java |   8 -
 .../hdfs/server/datanode/BlockScanner.java  | 308 +++
 .../hdfs/server/datanode/BlockSender.java   |   3 -
 .../hdfs/server/datanode/DataBlockScanner.java  | 339 ---
 .../hadoop/hdfs/server/datanode/DataNode.java   |  73 +-
 .../hdfs/server/datanode/VolumeScanner.java | 652 ++
 .../server/datanode/fsdataset/FsDatasetSpi.java |  32 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  | 110 +++
 .../server/datanode/fsdataset/RollingLogs.java  |  73 --
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  44 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 347 
 .../datanode/fsdataset/impl/FsVolumeList.java   |  24 +-
 .../fsdataset/impl/RollingLogsImpl.java | 241 -
 .../src/main/resources/hdfs-default.xml |  20 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  16 +
 .../hadoop/hdfs/TestDatanodeBlockScanner.java   | 551 
 .../org/apache/hadoop/hdfs/TestReplication.java |   3 +-
 .../TestOverReplicatedBlocks.java   |  13 +-
 .../server/datanode/BlockReportTestBase.java|   7 +-
 .../hdfs/server/datanode/DataNodeTestUtils.java |  24 -
 .../server/datanode/SimulatedFSDataset.java |  22 +-
 .../hdfs/server/datanode/TestBlockScanner.java  | 680 +++
 .../server/datanode/TestDirectoryScanner.java   |  16 +
 .../TestMultipleNNDataBlockScanner.java | 245 --
 .../extdataset/ExternalDatasetImpl.java |   7 -
 .../extdataset/ExternalRollingLogs.java |  92 --
 .../datanode/extdataset/ExternalVolumeImpl.java |  17 +
 .../extdataset/TestExternalDataset.java |   9 -
 .../fsdataset/impl/FsVolumeListTest.java|  17 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |  30 +-
 .../impl/TestInterDatanodeProtocol.java |   4 +-
 .../namenode/snapshot/SnapshotTestHelper.java   |   4 +-
 37 files changed, 2288 insertions(+), 2629 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df4edd9a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 25ad33b..866b765 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -140,6 +140,9 @@ Trunk (Unreleased)
 class and constructor to public; and fix FsDatasetSpi to use generic type
 instead of FsVolumeImpl.  (David Powell and Joe Pallas via szetszwo)
 
+HDFS-7430. Rewrite the BlockScanner to use O(1) memory and use multiple
+threads (cmccabe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df4edd9a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index fb958f1..60581b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -441,6 +441,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
   public static final String  DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = 
dfs.datanode.scan.period.hours;
   public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
+  public static final String  DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = 
dfs.block.scanner.volume.bytes.per.second;
+  public static final long
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
   public static final String  

[10/50] [abbrv] hadoop git commit: HADOOP-10668. Addendum patch to fix TestZKFailoverController. Contributed by Ming Ma.

2015-01-26 Thread zhz
HADOOP-10668. Addendum patch to fix TestZKFailoverController. Contributed by 
Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce169494
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce169494
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce169494

Branch: refs/heads/HDFS-EC
Commit: ce1694941c20bd42291c35503392e8bdcf84adeb
Parents: 8a690f9
Author: cnauroth cnaur...@apache.org
Authored: Wed Jan 21 11:52:48 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:26 2015 -0800

--
 .../test/java/org/apache/hadoop/ha/MiniZKFCCluster.java   |  5 +
 .../org/apache/hadoop/ha/TestZKFailoverController.java| 10 +-
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce169494/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
index cab59a4..5aee611 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
@@ -155,6 +155,11 @@ public class MiniZKFCCluster {
 
   /**
* Wait for the given HA service to enter the given HA state.
+   * This is based on the state of ZKFC, not the state of HA service.
+   * There could be difference between the two. For example,
+   * When the service becomes unhealthy, ZKFC will quit ZK election and
+   * transition to HAServiceState.INITIALIZING and remain in that state
+   * until the service becomes healthy.
*/
   public void waitForHAState(int idx, HAServiceState state)
   throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce169494/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
index 83a29dd..d8271c5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
@@ -211,8 +211,8 @@ public class TestZKFailoverController extends 
ClientBaseWithFixes {
   LOG.info(Faking svc0 unhealthy, should failover to svc1);
   cluster.setHealthy(0, false);
   
-  LOG.info(Waiting for svc0 to enter standby state);
-  cluster.waitForHAState(0, HAServiceState.STANDBY);
+  LOG.info(Waiting for svc0 to enter initializing state);
+  cluster.waitForHAState(0, HAServiceState.INITIALIZING);
   cluster.waitForHAState(1, HAServiceState.ACTIVE);
   
   LOG.info(Allowing svc0 to be healthy again, making svc1 unreachable  +
@@ -332,7 +332,7 @@ public class TestZKFailoverController extends 
ClientBaseWithFixes {
   Mockito.verify(svc1.proxy, Mockito.timeout(2000).atLeastOnce())
 .transitionToActive(Mockito.StateChangeRequestInfoany());
 
-  cluster.waitForHAState(0, HAServiceState.STANDBY);
+  cluster.waitForHAState(0, HAServiceState.INITIALIZING);
   cluster.waitForHAState(1, HAServiceState.STANDBY);
   
   LOG.info(Faking svc0 healthy again, should go back to svc0);
@@ -587,12 +587,12 @@ public class TestZKFailoverController extends 
ClientBaseWithFixes {
 
   // Failover by bad health
   cluster.setHealthy(0, false);
-  cluster.waitForHAState(0, HAServiceState.STANDBY);
+  cluster.waitForHAState(0, HAServiceState.INITIALIZING);
   cluster.waitForHAState(1, HAServiceState.ACTIVE);
   cluster.setHealthy(1, true);
   cluster.setHealthy(0, false);
   cluster.waitForHAState(1, HAServiceState.ACTIVE);
-  cluster.waitForHAState(0, HAServiceState.STANDBY);
+  cluster.waitForHAState(0, HAServiceState.INITIALIZING);
   cluster.setHealthy(0, true);
   
   cluster.waitForHealthState(0, State.SERVICE_HEALTHY);



[27/50] [abbrv] hadoop git commit: HADOOP-11209. Configuration#updatingResource/finalParameters are not thread-safe. Contributed by Varun Saxena.

2015-01-26 Thread zhz
HADOOP-11209. Configuration#updatingResource/finalParameters are not 
thread-safe. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea85ae4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea85ae4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea85ae4f

Branch: refs/heads/HDFS-EC
Commit: ea85ae4f80e0e6781bbe483fd5babb0669ea141c
Parents: 73309fb
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Jan 22 14:15:59 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:28 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../org/apache/hadoop/conf/Configuration.java   | 46 +---
 .../apache/hadoop/conf/TestConfiguration.java   | 46 
 3 files changed, 80 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea85ae4f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index abe699a..eb9015c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -741,6 +741,9 @@ Release 2.7.0 - UNRELEASED
 architecture because it is slower there (Suman Somasundar via Colin P.
 McCabe)
 
+HADOOP-11209. Configuration#updatingResource/finalParameters are not
+thread-safe. (Varun Saxena via ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea85ae4f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index afcea44..54ee46d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -52,6 +52,7 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.StringTokenizer;
 import java.util.WeakHashMap;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -228,7 +229,8 @@ public class Configuration implements 
IterableMap.EntryString,String,
   /**
* List of configuration parameters marked bfinal/b. 
*/
-  private SetString finalParameters = new HashSetString();
+  private SetString finalParameters = Collections.newSetFromMap(
+  new ConcurrentHashMapString, Boolean());
   
   private boolean loadDefaults = true;
   
@@ -258,7 +260,7 @@ public class Configuration implements 
IterableMap.EntryString,String,
* Stores the mapping of key to the resource which modifies or loads 
* the key most recently
*/
-  private HashMapString, String[] updatingResource;
+  private MapString, String[] updatingResource;
  
   /**
* Class to keep the information about the keys which replace the deprecated
@@ -685,7 +687,7 @@ public class Configuration implements 
IterableMap.EntryString,String,
*/
   public Configuration(boolean loadDefaults) {
 this.loadDefaults = loadDefaults;
-updatingResource = new HashMapString, String[]();
+updatingResource = new ConcurrentHashMapString, String[]();
 synchronized(Configuration.class) {
   REGISTRY.put(this, null);
 }
@@ -708,8 +710,11 @@ public class Configuration implements 
IterableMap.EntryString,String,
this.overlay = (Properties)other.overlay.clone();
  }
 
- this.updatingResource = new HashMapString, 
String[](other.updatingResource);
- this.finalParameters = new HashSetString(other.finalParameters);
+ this.updatingResource = new ConcurrentHashMapString, String[](
+ other.updatingResource);
+ this.finalParameters = Collections.newSetFromMap(
+ new ConcurrentHashMapString, Boolean());
+ this.finalParameters.addAll(other.finalParameters);
}

 synchronized(Configuration.class) {
@@ -2314,20 +2319,27 @@ public class Configuration implements 
IterableMap.EntryString,String,
* @return final parameter set.
*/
   public SetString getFinalParameters() {
-return new HashSetString(finalParameters);
+SetString setFinalParams = Collections.newSetFromMap(
+new ConcurrentHashMapString, Boolean());
+setFinalParams.addAll(finalParameters);
+return setFinalParams;
   }
 
   protected synchronized Properties getProps() {
 

[09/50] [abbrv] hadoop git commit: HDFS-7548. Corrupt block reporting delayed until datablock scanner thread detects it. Contributed by Rushabh Shah.

2015-01-26 Thread zhz
HDFS-7548. Corrupt block reporting delayed until datablock scanner thread 
detects it. Contributed by Rushabh Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8c59ba0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8c59ba0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8c59ba0

Branch: refs/heads/HDFS-EC
Commit: a8c59ba0cef5b904b6f499c8e073203abb91d2b4
Parents: ce16949
Author: Kihwal Lee kih...@apache.org
Authored: Wed Jan 21 14:41:31 2015 -0600
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:26 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/datanode/BlockPoolSliceScanner.java  | 49 ++--
 .../hdfs/server/datanode/BlockSender.java   |  3 +
 .../hdfs/server/datanode/DataBlockScanner.java  | 15 -
 .../hadoop/hdfs/server/datanode/DataNode.java   |  2 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  4 +-
 .../hadoop/hdfs/TestDatanodeBlockScanner.java   | 60 +++-
 7 files changed, 125 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8c59ba0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0a301f8..25ad33b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -756,6 +756,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
 Colin P. McCabe)
 
+HDFS-7548. Corrupt block reporting delayed until datablock scanner thread
+detects it (Rushabh Shah via kihwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8c59ba0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
index 61f1e7e..f36fea1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
@@ -105,6 +105,7 @@ class BlockPoolSliceScanner {
   private long bytesLeft = 0; // Bytes to scan in this period
   private long totalBytesToScan = 0;
   private boolean isNewPeriod = true;
+  private int lastScanTimeDifference = 5*60*1000;
   
   private final LogFileHandler verificationLog;
   
@@ -112,6 +113,7 @@ class BlockPoolSliceScanner {
200, MAX_SCAN_RATE);
   
   private static enum ScanType {
+IMMEDIATE_SCAN,  
 VERIFICATION_SCAN, // scanned as part of periodic verfication
 NONE,
   }
@@ -129,12 +131,17 @@ class BlockPoolSliceScanner {
 
   @Override
   public int compare(BlockScanInfo left, BlockScanInfo right) {
+final ScanType leftNextScanType = left.nextScanType;
+final ScanType rightNextScanType = right.nextScanType;
 final long l = left.lastScanTime;
 final long r = right.lastScanTime;
+// Compare by nextScanType if they are same then compare by 
+// lastScanTimes
 // compare blocks itself if scantimes are same to avoid.
 // because TreeMap uses comparator if available to check existence of
 // the object. 
-return l  r? -1: l  r? 1: left.compareTo(right); 
+int compareByNextScanType = 
leftNextScanType.compareTo(rightNextScanType);
+return compareByNextScanType  0? -1: compareByNextScanType  0? 1:  l 
 r? -1: l  r? 1: left.compareTo(right); 
   }
 };
 
@@ -142,6 +149,7 @@ class BlockPoolSliceScanner {
 ScanType lastScanType = ScanType.NONE; 
 boolean lastScanOk = true;
 private LinkedElement next;
+ScanType nextScanType = ScanType.VERIFICATION_SCAN;
 
 BlockScanInfo(Block block) {
   super(block);
@@ -265,10 +273,12 @@ class BlockPoolSliceScanner {
   private synchronized void updateBlockInfo(LogEntry e) {
 BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp));
 
-if(info != null  e.verificationTime  0  
+if (info != null  e.verificationTime  0  
 info.lastScanTime  e.verificationTime) {
   delBlockInfo(info);
-  info.lastScanTime = e.verificationTime;
+  if (info.nextScanType != ScanType.IMMEDIATE_SCAN) {
+info.lastScanTime = e.verificationTime;
+  }
   

[39/50] [abbrv] hadoop git commit: HDFS-7676. Fix TestFileTruncate to avoid bug of HDFS-7611. Contributed by Konstantin Shvachko.

2015-01-26 Thread zhz
HDFS-7676. Fix TestFileTruncate to avoid bug of HDFS-7611. Contributed by 
Konstantin Shvachko.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38cbafd1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38cbafd1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38cbafd1

Branch: refs/heads/HDFS-EC
Commit: 38cbafd18cf18171ded4e4d0518d67be13145018
Parents: e461d62
Author: Konstantin V Shvachko s...@apache.org
Authored: Sat Jan 24 18:03:11 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:30 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 .../apache/hadoop/hdfs/server/namenode/TestFileTruncate.java| 5 +
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38cbafd1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 21c8374..a6cbf8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -785,6 +785,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7659. truncate should check negative value of the new length.
 (Yi Liu via shv)
 
+HDFS-7676. Fix TestFileTruncate to avoid bug of HDFS-7611. (shv)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38cbafd1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 1612a24..e8250a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -496,6 +496,11 @@ public class TestFileTruncate {
*/
   @Test
   public void testTruncateEditLogLoad() throws IOException {
+// purge previously accumulated edits
+fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+fs.saveNamespace();
+fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
 int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
 int toTruncate = 1;
 final String s = /testTruncateEditLogLoad;



[06/50] [abbrv] hadoop git commit: HDFS-7623. Add htrace configuration properties to core-default.xml and update user doc about how to enable htrace. (yliu)

2015-01-26 Thread zhz
HDFS-7623. Add htrace configuration properties to core-default.xml and update 
user doc about how to enable htrace. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3111cfb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3111cfb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3111cfb1

Branch: refs/heads/HDFS-EC
Commit: 3111cfb18fe2e25c4dd96620f16ed53b322dd635
Parents: 7ba6504
Author: yliu y...@apache.org
Authored: Wed Jan 21 00:59:47 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:25 2015 -0800

--
 .../src/main/resources/core-default.xml | 24 +++
 .../hadoop-common/src/site/apt/Tracing.apt.vm   | 25 +++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 3 files changed, 46 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3111cfb1/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index e7a382d..bebc263 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1653,4 +1653,28 @@ for ldap providers in the same way as above does.
   /description
 /property
 
+property
+  namehadoop.htrace.sampler/name
+  valueNeverSampler/value
+  description
+Configure the samplers for HTrace, the value can be NeverSampler,
+AlwaysSampler or ProbabilitySampler. NeverSampler: HTrace is OFF 
+for all spans; AlwaysSampler: HTrace is ON for all spans;
+ProbabilitySampler: HTrace is ON for some percentage% of top-level 
+spans.
+  /description
+/property
+
+property
+  namehadoop.htrace.spanreceiver.classes/name
+  value/value
+  description
+A comma separated list of the fully-qualified class name of classes 
+implementing SpanReceiver. The tracing system works by collecting 
+information in structs called 'Spans'. It is up to you to choose 
+how you want to receive this information by implementing the 
+SpanReceiver interface.
+  /description
+/property
+
 /configuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3111cfb1/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm
--
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm 
b/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm
index 9eda220..f04da33 100644
--- a/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm
+++ b/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm
@@ -16,19 +16,32 @@
   ---
   ${maven.build.timestamp}
 
-Enabling Dapper-like Tracing in HDFS
+Enabling Dapper-like Tracing in Hadoop
 
 %{toc|section=1|fromDepth=0}
 
-* {Dapper-like Tracing in HDFS}
+* {Dapper-like Tracing in Hadoop}
 
 ** HTrace
 
   {{{https://issues.apache.org/jira/browse/HDFS-5274}HDFS-5274}}
   added support for tracing requests through HDFS,
-  using the open source tracing library, 
{{{https://github.com/cloudera/htrace}HTrace}}.
+  using the open source tracing library, 
{{{https://git-wip-us.apache.org/repos/asf/incubator-htrace.git}Apache HTrace}}.
   Setting up tracing is quite simple, however it requires some very minor 
changes to your client code.
 
+** Samplers
+  Configure the samplers in core-site.xml property: 
hadoop.htrace.sampler.
+  The value can be NeverSampler, AlwaysSampler or ProbabilitySampler. 
NeverSampler: HTrace is OFF 
+  for all spans; AlwaysSampler: HTrace is ON for all spans; 
ProbabilitySampler: HTrace is ON for 
+  some percentage% of top-level spans.
+
++
+  property
+namehadoop.htrace.sampler/name
+valueNeverSampler/value
+  /property
++
+
 ** SpanReceivers
 
   The tracing system works by collecting information in structs called 'Spans'.
@@ -42,7 +55,7 @@ public void receiveSpan(Span span);
   Configure what SpanReceivers you'd like to use
   by putting a comma separated list of the fully-qualified class name of
   classes implementing SpanReceiver
-  in hdfs-site.xml property: hadoop.htrace.spanreceiver.classes.
+  in core-site.xml property: hadoop.htrace.spanreceiver.classes.
 
 +
   property
@@ -83,11 +96,11 @@ public void receiveSpan(Span span);
   $ git clone https://github.com/cloudera/htrace
   $ cd htrace/htrace-zipkin
   $ mvn compile assembly:single
-  $ cp target/htrace-zipkin-*-jar-with-dependencies.jar 
$HADOOP_HOME/share/hadoop/hdfs/lib/
+  $ cp target/htrace-zipkin-*-jar-with-dependencies.jar 

[38/50] [abbrv] hadoop git commit: HDFS-3750. API docs don't include HDFS (Jolly Chen via aw)

2015-01-26 Thread zhz
HDFS-3750. API docs don't include HDFS (Jolly Chen via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc9ed524
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc9ed524
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc9ed524

Branch: refs/heads/HDFS-EC
Commit: cc9ed5249769c91e216bef48763e084578f15d35
Parents: f3c4294
Author: Allen Wittenauer a...@apache.org
Authored: Fri Jan 23 14:10:44 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:30 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 pom.xml | 6 +-
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc9ed524/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6849229..053b2eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -293,6 +293,8 @@ Trunk (Unreleased)
 HDFS-7667. Various typos and improvements to HDFS Federation doc
 (Charles Lamb via aw)
 
+HDFS-3750. API docs don't include HDFS (Jolly Chen via aw)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc9ed524/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 72ca43d..e33865f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -367,13 +367,17 @@ xsi:schemaLocation=http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
   
reportOutputDirectory${project.build.directory}/site/reportOutputDirectory
   destDirhadoop-project/api/destDir
   !-- Non-public APIs --
-  
excludePackageNamesorg.apache.hadoop.authentication*,org.apache.hadoop.hdfs*,org.apache.hadoop.mapreduce.v2.proto,org.apache.hadoop.yarn.proto,org.apache.hadoop.yarn.server*,org.apache.hadoop.yarn.webapp*/excludePackageNames
+  
excludePackageNamesorg.apache.hadoop.authentication*,org.apache.hadoop.mapreduce.v2.proto,org.apache.hadoop.yarn.proto,org.apache.hadoop.yarn.server*,org.apache.hadoop.yarn.webapp*/excludePackageNames
   groups
 group
   titleCommon/title
   packagesorg.apache.hadoop*/packages
 /group
 group
+  titleHDFS/title
+  packagesorg.apache.hadoop.hdfs*/packages
+/group
+group
   titleMapReduce/title
   packagesorg.apache.hadoop.mapred*/packages
 /group



[18/50] [abbrv] hadoop git commit: HADOOP-11490. Expose truncate API via FileSystem and shell command. Contributed by Milan Desai.

2015-01-26 Thread zhz
HADOOP-11490. Expose truncate API via FileSystem and shell command. Contributed 
by Milan Desai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2a8eca3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2a8eca3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2a8eca3

Branch: refs/heads/HDFS-EC
Commit: f2a8eca3d1031e3fb179183c13ec704b134ad51e
Parents: c1ad0a8
Author: Konstantin V Shvachko s...@apache.org
Authored: Wed Jan 21 15:58:58 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:27 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   5 +-
 .../apache/hadoop/fs/ChecksumFileSystem.java|   5 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |  23 
 .../org/apache/hadoop/fs/FilterFileSystem.java  |   5 +
 .../org/apache/hadoop/fs/HarFileSystem.java |   8 ++
 .../apache/hadoop/fs/RawLocalFileSystem.java|  25 
 .../org/apache/hadoop/fs/shell/FsCommand.java   |   1 +
 .../org/apache/hadoop/fs/shell/Truncate.java| 117 +++
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  13 +++
 .../hadoop/hdfs/DistributedFileSystem.java  |   9 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  69 +++
 11 files changed, 271 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2a8eca3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c54800f..66fd138 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -18,7 +18,10 @@ Trunk (Unreleased)
 HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via 
aw)
 
 HADOOP-11353. Add support for .hadooprc (aw)
-
+
+HADOOP-11490. Expose truncate API via FileSystem and shell command.
+(Milan Desai via shv)
+
   IMPROVEMENTS
 
 HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2a8eca3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index b6b865c..dddf0ce 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -352,6 +352,11 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
 throw new IOException(Not supported);
   }
 
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+throw new IOException(Not supported);
+  }
+
   /**
* Calculated the length of the checksum file in bytes.
* @param size the length of the data file in bytes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2a8eca3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 619f433..cfa5198 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1317,6 +1317,29 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   throw new IOException(rename from  + src +  to  + dst +  failed.);
 }
   }
+
+  /**
+   * Truncate the file in the indicated path to the indicated size.
+   * ul
+   * liFails if path is a directory.
+   * liFails if path does not exist.
+   * liFails if path is not closed.
+   * liFails if new size is greater than current size.
+   * /ul
+   * @param f The path to the file to be truncated
+   * @param newLength The size the file is to be truncated to
+   *
+   * @return codetrue/code if the file has been truncated to the desired
+   * codenewLength/code and is immediately available to be reused for
+   * write operations such as codeappend/code, or
+   * codefalse/code if a background process of adjusting the length of
+   * the last block has been started, and clients should wait for it to
+   * complete before proceeding with further file updates.
+   */
+  public boolean 

[11/50] [abbrv] hadoop git commit: HDFS-3443. Fix NPE when namenode transition to active during startup by adding checkNNStartup() in NameNodeRpcServer. Contributed by Vinayakumar B

2015-01-26 Thread zhz
HDFS-3443. Fix NPE when namenode transition to active during startup by adding 
checkNNStartup() in NameNodeRpcServer.  Contributed by Vinayakumar B


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a690f9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a690f9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a690f9f

Branch: refs/heads/HDFS-EC
Commit: 8a690f9f41cb18dd2a3bc58c38387fa3bc9a3d65
Parents: dc9978a
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Wed Jan 21 11:32:32 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:26 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/server/namenode/NameNode.java   |  12 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java | 187 +++
 3 files changed, 165 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a690f9f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2ab8ea3..0a301f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -783,6 +783,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7443. Datanode upgrade to BLOCKID_BASED_LAYOUT fails if duplicate
 block files are present in the same volume (cmccabe)
 
+HDFS-3443. Fix NPE when namenode transition to active during startup by
+adding checkNNStartup() in NameNodeRpcServer.  (Vinayakumar B via szetszwo)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a690f9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index a71d158..fea7c62 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -79,6 +79,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
@@ -265,6 +266,7 @@ public class NameNode implements NameNodeStatusMXBean {
   private final boolean haEnabled;
   private final HAContext haContext;
   protected final boolean allowStaleStandbyReads;
+  private AtomicBoolean started = new AtomicBoolean(false); 
 
   
   /** httpServer */
@@ -775,6 +777,7 @@ public class NameNode implements NameNodeStatusMXBean {
   this.stop();
   throw e;
 }
+this.started.set(true);
   }
 
   protected HAState createHAState(StartupOption startOpt) {
@@ -1743,7 +1746,14 @@ public class NameNode implements NameNodeStatusMXBean {
   public boolean isActiveState() {
 return (state.equals(ACTIVE_STATE));
   }
-  
+
+  /**
+   * Returns whether the NameNode is completely started
+   */
+  boolean isStarted() {
+return this.started.get();
+  }
+
   /**
* Check that a request to change this node's HA state is valid.
* In particular, verifies that, if auto failover is enabled, non-forced

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a690f9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 6ef8fd6..a3ac455 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -69,7 +69,6 @@ import 
org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
-import org.apache.hadoop.hdfs.inotify.Event;
 import org.apache.hadoop.hdfs.inotify.EventBatch;
 import 

[45/50] [abbrv] hadoop git commit: HADOOP-11499. Check of executorThreadsStarted in ValueQueue#submitRefillTask() evades lock acquisition. Contributed by Ted Yu

2015-01-26 Thread zhz
HADOOP-11499. Check of executorThreadsStarted in ValueQueue#submitRefillTask() 
evades lock acquisition. Contributed by Ted Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ab153e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ab153e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ab153e4

Branch: refs/heads/HDFS-EC
Commit: 7ab153e4a54757f825f3b37168a42d237d0497c0
Parents: 4216800
Author: Jason Lowe jl...@apache.org
Authored: Mon Jan 26 16:56:14 2015 +
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:31 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../org/apache/hadoop/crypto/key/kms/ValueQueue.java| 12 +++-
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ab153e4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 662f580..598f750 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -764,6 +764,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11482. Use correct UGI when KMSClientProvider is called by a proxy
 user. Contributed by Arun Suresh.
 
+HADOOP-11499. Check of executorThreadsStarted in
+ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ab153e4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
index 8e67ecc..32451d8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
@@ -308,11 +308,13 @@ public class ValueQueue E {
   final QueueE keyQueue) throws InterruptedException {
 if (!executorThreadsStarted) {
   synchronized (this) {
-// To ensure all requests are first queued, make coreThreads =
-// maxThreads
-// and pre-start all the Core Threads.
-executor.prestartAllCoreThreads();
-executorThreadsStarted = true;
+if (!executorThreadsStarted) {
+  // To ensure all requests are first queued, make coreThreads =
+  // maxThreads
+  // and pre-start all the Core Threads.
+  executor.prestartAllCoreThreads();
+  executorThreadsStarted = true;
+}
   }
 }
 // The submit/execute method of the ThreadPoolExecutor is bypassed and



[36/50] [abbrv] hadoop git commit: YARN-3082. Non thread safe access to systemCredentials in NodeHeartbeatResponse processing. Contributed by Anubhav Dhoot.

2015-01-26 Thread zhz
YARN-3082. Non thread safe access to systemCredentials in NodeHeartbeatResponse 
processing. Contributed by Anubhav Dhoot.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a06d2d65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a06d2d65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a06d2d65

Branch: refs/heads/HDFS-EC
Commit: a06d2d65d38ae464d02809b1c6718d20b960ac8b
Parents: 8a96c6d
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Fri Jan 23 16:04:18 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:29 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../impl/pb/NodeHeartbeatResponsePBImpl.java|  3 +-
 .../nodemanager/TestNodeStatusUpdater.java  | 63 
 3 files changed, 68 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a06d2d65/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bdc31db..7f0628d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -400,6 +400,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3078. LogCLIHelpers lacks of a blank space before string 'does not 
exist'.
 (Sam Liu via ozawa)
 
+YARN-3082. Non thread safe access to systemCredentials in 
NodeHeartbeatResponse
+processing. (Anubhav Dhoot via ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a06d2d65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
index 1e91514..630a5bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
@@ -104,7 +104,8 @@ public class NodeHeartbeatResponsePBImpl extends
 for (Map.EntryApplicationId, ByteBuffer entry : 
systemCredentials.entrySet()) {
   
builder.addSystemCredentialsForApps(SystemCredentialsForAppsProto.newBuilder()
 .setAppId(convertToProtoFormat(entry.getKey()))
-
.setCredentialsForApp(ProtoUtils.convertToProtoFormat(entry.getValue(;
+.setCredentialsForApp(ProtoUtils.convertToProtoFormat(
+entry.getValue().duplicate(;
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a06d2d65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index 46d7b10..71a420e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.nodemanager;
 
+import static 
org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils.newNodeHeartbeatResponse;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -37,7 +38,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import 

[17/50] [abbrv] hadoop git commit: HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC architecture because it is slower there (Suman Somasundar via Colin P. McCabe)

2015-01-26 Thread zhz
HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC 
architecture because it is slower there (Suman Somasundar via Colin P.  McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ee89ce9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ee89ce9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ee89ce9

Branch: refs/heads/HDFS-EC
Commit: 4ee89ce97b214bcb3e348b2cc2e86f6216d492ad
Parents: f2a8eca
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Jan 21 16:33:02 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:27 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 
 .../apache/hadoop/io/FastByteComparisons.java   | 21 +++-
 2 files changed, 24 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ee89ce9/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 66fd138..abe699a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -737,6 +737,10 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11327. BloomFilter#not() omits the last bit, resulting in an
 incorrect filter (Eric Payne via jlowe)
 
+HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
+architecture because it is slower there (Suman Somasundar via Colin P.
+McCabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ee89ce9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
index 3f5881b..a3fea31 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
@@ -24,6 +24,9 @@ import java.security.PrivilegedAction;
 
 import sun.misc.Unsafe;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 import com.google.common.primitives.Longs;
 import com.google.common.primitives.UnsignedBytes;
 
@@ -33,6 +36,7 @@ import com.google.common.primitives.UnsignedBytes;
  * class to be able to compare arrays that start at non-zero offsets.
  */
 abstract class FastByteComparisons {
+  static final Log LOG = LogFactory.getLog(FastByteComparisons.class);
 
   /**
* Lexicographically compare two byte arrays.
@@ -71,6 +75,13 @@ abstract class FastByteComparisons {
  * implementation if unable to do so.
  */
 static Comparerbyte[] getBestComparer() {
+  if (System.getProperty(os.arch).equals(sparc)) {
+if (LOG.isTraceEnabled()) {
+  LOG.trace(Lexicographical comparer selected for 
+  + byte aligned system architecture);
+}
+return lexicographicalComparerJavaImpl();
+  }
   try {
 Class? theClass = Class.forName(UNSAFE_COMPARER_NAME);
 
@@ -78,8 +89,16 @@ abstract class FastByteComparisons {
 @SuppressWarnings(unchecked)
 Comparerbyte[] comparer =
   (Comparerbyte[]) theClass.getEnumConstants()[0];
+if (LOG.isTraceEnabled()) {
+  LOG.trace(Unsafe comparer selected for 
+  + byte unaligned system architecture);
+}
 return comparer;
   } catch (Throwable t) { // ensure we really catch *everything*
+if (LOG.isTraceEnabled()) {
+  LOG.trace(t.getMessage());
+  LOG.trace(Lexicographical comparer selected);
+}
 return lexicographicalComparerJavaImpl();
   }
 }
@@ -234,4 +253,4 @@ abstract class FastByteComparisons {
   }
 }
   }
-}
\ No newline at end of file
+}



[42/50] [abbrv] hadoop git commit: HDFS-7320. The appearance of hadoop-hdfs-httpfs site docs is inconsistent (Masatake Iwasaki via aw)

2015-01-26 Thread zhz
HDFS-7320. The appearance of hadoop-hdfs-httpfs site docs is inconsistent 
(Masatake Iwasaki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8b1ce9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8b1ce9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8b1ce9b

Branch: refs/heads/HDFS-EC
Commit: f8b1ce9bdc64d3afca0e64ee3a50cf27471b2fda
Parents: cc9ed52
Author: Allen Wittenauer a...@apache.org
Authored: Fri Jan 23 14:21:55 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:30 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b1ce9b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index 0bb6d4b..4c42ef9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -319,7 +319,7 @@
 goals
   goaldependencies/goal
 /goals
-phasesite/phase
+phasepackage/phase
   /execution
 /executions
   /plugin

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b1ce9b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 053b2eb..08f705a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -295,6 +295,9 @@ Trunk (Unreleased)
 
 HDFS-3750. API docs don't include HDFS (Jolly Chen via aw)
 
+HDFS-7320. The appearance of hadoop-hdfs-httpfs site docs is inconsistent 
+(Masatake Iwasaki via aw)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[47/50] [abbrv] hadoop git commit: MAPREDUCE-6141. History server leveldb recovery store. Contributed by Jason Lowe

2015-01-26 Thread zhz
MAPREDUCE-6141. History server leveldb recovery store. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4216800c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4216800c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4216800c

Branch: refs/heads/HDFS-EC
Commit: 4216800c00b2136e73102c7751b56b5d121bea20
Parents: b327379
Author: Jason Lowe jl...@apache.org
Authored: Mon Jan 26 16:28:55 2015 +
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:31 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|   2 +
 .../mapreduce/v2/jobhistory/JHAdminConfig.java  |   7 +
 .../src/main/resources/mapred-default.xml   |   8 +
 .../hadoop-mapreduce-client-hs/pom.xml  |   4 +
 .../HistoryServerLeveldbStateStoreService.java  | 379 +++
 ...stHistoryServerLeveldbStateStoreService.java | 207 ++
 6 files changed, 607 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4216800c/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b28fc65..35ceb2e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -262,6 +262,8 @@ Release 2.7.0 - UNRELEASED
 cache with enabling wired encryption at the same time. 
 (Junping Du via xgong)
 
+MAPREDUCE-6141. History server leveldb recovery store (jlowe)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4216800c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
index e5a49b5..f7cba9f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
@@ -197,6 +197,13 @@ public class JHAdminConfig {
   public static final String MR_HS_FS_STATE_STORE_URI =
   MR_HISTORY_PREFIX + recovery.store.fs.uri;
 
+  /**
+   * The local path where server state will be stored when
+   * HistoryServerLeveldbStateStoreService is configured as the state store
+   */
+  public static final String MR_HS_LEVELDB_STATE_STORE_PATH =
+  MR_HISTORY_PREFIX + recovery.store.leveldb.path;
+
   /** Whether to use fixed ports with the minicluster. */
   public static final String MR_HISTORY_MINICLUSTER_FIXED_PORTS = 
MR_HISTORY_PREFIX
+ minicluster.fixed.ports;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4216800c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 57a17a8..4535137 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1569,6 +1569,14 @@
 /property
 
 property
+  namemapreduce.jobhistory.recovery.store.leveldb.path/name
+  value${hadoop.tmp.dir}/mapred/history/recoverystore/value
+  descriptionThe URI where history server state will be stored if
+  HistoryServerLeveldbSystemStateStoreService is configured as the recovery
+  storage class./description
+/property
+
+property
   namemapreduce.jobhistory.http.policy/name
   valueHTTP_ONLY/value
   description

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4216800c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
 

[29/50] [abbrv] hadoop git commit: HADOOP-11008. Remove duplicated description about proxy-user in site documents (Masatake Iwasaki via aw)

2015-01-26 Thread zhz
HADOOP-11008. Remove duplicated description about proxy-user in site documents 
(Masatake Iwasaki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c13d501f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c13d501f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c13d501f

Branch: refs/heads/HDFS-EC
Commit: c13d501f51bc26368ef04631c2aadc2365a05c26
Parents: abb0115
Author: Allen Wittenauer a...@apache.org
Authored: Thu Jan 22 14:30:21 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:28 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../src/site/apt/SecureMode.apt.vm  | 53 +-
 .../src/site/apt/Superusers.apt.vm  | 74 
 hadoop-project/src/site/site.xml|  2 +-
 4 files changed, 64 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13d501f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index aaa7041..47eaf7b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -747,6 +747,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11500. InputStream is left unclosed in ApplicationClassLoader.
 (Ted Yu via ozawa)
 
+HADOOP-11008. Remove duplicated description about proxy-user in site 
+documents (Masatake Iwasaki via aw)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13d501f/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
--
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm 
b/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
index 0a11bef..0235219 100644
--- a/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
+++ b/hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
@@ -202,58 +202,7 @@ KVNO Timestamp Principal
 
   Some products such as Apache Oozie which access the services of Hadoop
   on behalf of end users need to be able to impersonate end users.
-  You can configure proxy user using properties
-  hadoop.proxyuser.${superuser}.hosts along with either or both of 
-  hadoop.proxyuser.${superuser}.groups
-  and hadoop.proxyuser.${superuser}.users.
-
-  For example, by specifying as below in core-site.xml,
-  user named oozie accessing from any host
-  can impersonate any user belonging to any group.
-
-
-  property
-namehadoop.proxyuser.oozie.hosts/name
-value*/value
-  /property
-  property
-namehadoop.proxyuser.oozie.groups/name
-value*/value
-  /property
-
-
-  User named oozie accessing from any host
-  can impersonate user1 and user2 by specifying as below in core-site.xml.
-
-
-  property
-namehadoop.proxyuser.oozie.hosts/name
-value*/value
-  /property
-  property
-namehadoop.proxyuser.oozie.users/name
-valueuser1,user2/value
-  /property
-
-
-  The hadoop.proxyuser.${superuser}.hosts accepts list of ip addresses,
-  ip address ranges in CIDR format and/or host names.
-  
-  For example, by specifying as below in core-site.xml,
-  user named oozie accessing from hosts in the range 
-  10.222.0.0-15 and 10.113.221.221
-  can impersonate any user belonging to any group.
-  
-
-  property
-namehadoop.proxyuser.oozie.hosts/name
-value10.222.0.0/16,10.113.221.221/value
-  /property
-  property
-namehadoop.proxyuser.oozie.groups/name
-value*/value
-  /property
-
+  See {{{./Superusers.html}the doc of proxy user}} for details.
 
 ** Secure DataNode
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13d501f/hadoop-common-project/hadoop-common/src/site/apt/Superusers.apt.vm
--
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/Superusers.apt.vm 
b/hadoop-common-project/hadoop-common/src/site/apt/Superusers.apt.vm
index f940884..78ed9a4 100644
--- a/hadoop-common-project/hadoop-common/src/site/apt/Superusers.apt.vm
+++ b/hadoop-common-project/hadoop-common/src/site/apt/Superusers.apt.vm
@@ -11,19 +11,19 @@
 ~~ limitations under the License. See accompanying LICENSE file.
 
   ---
-  Superusers Acting On Behalf Of Other Users
+  Proxy user - Superusers Acting On Behalf Of Other Users
   ---
   ---
   ${maven.build.timestamp}
 
-Superusers Acting On Behalf Of Other Users
+Proxy user - Superusers Acting On Behalf Of Other Users
 
 %{toc|section=1|fromDepth=0}
 
 * Introduction
 
This document 

[03/50] [abbrv] hadoop git commit: HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin P. McCabe)

2015-01-26 Thread zhz
HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin 
P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef4453db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef4453db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef4453db

Branch: refs/heads/HDFS-EC
Commit: ef4453dba91e70165da33a14bf2688b7222ae508
Parents: 8c130ae
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Jan 20 20:11:09 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:25 2015 -0800

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 16 +
 .../datanode/fsdataset/impl/FsVolumeList.java   |  8 +++--
 .../fsdataset/impl/TestFsDatasetImpl.java   | 37 ++--
 3 files changed, 49 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef4453db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 5347323..d8cc287 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -342,7 +342,7 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 
 StorageType storageType = location.getStorageType();
 final FsVolumeImpl fsVolume = new FsVolumeImpl(
-this, sd.getStorageUuid(), dir, this.conf, storageType);
+this, sd.getStorageUuid(), sd.getCurrentDir(), this.conf, storageType);
 final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume);
 ArrayListIOException exceptions = Lists.newArrayList();
 
@@ -385,19 +385,19 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl 
{
*/
   @Override
   public synchronized void removeVolumes(CollectionStorageLocation volumes) {
-SetFile volumeSet = new HashSetFile();
+SetString volumeSet = new HashSet();
 for (StorageLocation sl : volumes) {
-  volumeSet.add(sl.getFile());
+  volumeSet.add(sl.getFile().getAbsolutePath());
 }
 for (int idx = 0; idx  dataStorage.getNumStorageDirs(); idx++) {
   Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-  if (volumeSet.contains(sd.getRoot())) {
-String volume = sd.getRoot().toString();
+  String volume = sd.getRoot().getAbsolutePath();
+  if (volumeSet.contains(volume)) {
 LOG.info(Removing  + volume +  from FsDataset.);
 
 // Disable the volume from the service.
 asyncDiskService.removeVolume(sd.getCurrentDir());
-this.volumes.removeVolume(volume);
+this.volumes.removeVolume(sd.getRoot());
 
 // Removed all replica information for the blocks on the volume. Unlike
 // updating the volumeMap in addVolume(), this operation does not scan
@@ -407,7 +407,9 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
   for (IteratorReplicaInfo it = volumeMap.replicas(bpid).iterator();
   it.hasNext(); ) {
 ReplicaInfo block = it.next();
-if (block.getVolume().getBasePath().equals(volume)) {
+String absBasePath =
+  new File(block.getVolume().getBasePath()).getAbsolutePath();
+if (absBasePath.equals(volume)) {
   invalidate(bpid, block);
   blocks.add(block);
   it.remove();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef4453db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index ba19897..c837593 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
+import java.io.File;
 import java.io.IOException;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;

[33/50] [abbrv] hadoop git commit: HDFS-7667. Various typos and improvements to HDFS Federation doc (Charles Lamb via aw)

2015-01-26 Thread zhz
HDFS-7667. Various typos and improvements to HDFS Federation doc  (Charles Lamb 
via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d56e750e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d56e750e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d56e750e

Branch: refs/heads/HDFS-EC
Commit: d56e750e4f413018c1b80736fc4cbfa32f933c0d
Parents: 2e02d86
Author: Allen Wittenauer a...@apache.org
Authored: Fri Jan 23 13:37:46 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:29 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop-hdfs/src/site/apt/Federation.apt.vm  | 207 +--
 2 files changed, 105 insertions(+), 105 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d56e750e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9176ec7..c9bee1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -290,6 +290,9 @@ Trunk (Unreleased)
 HADOOP-11484. hadoop-mapreduce-client-nativetask fails to build on ARM
 AARCH64 due to x86 asm statements (Edward Nevill via Colin P. McCabe)
 
+HDFS-7667. Various typos and improvements to HDFS Federation doc
+(Charles Lamb via aw)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d56e750e/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
index 29278b7..17aaf3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
@@ -32,16 +32,16 @@ HDFS Federation
 
   * Namespace
 
-* Consists of directories, files and blocks
+* Consists of directories, files and blocks.
 
 * It supports all the namespace related file system operations such as
   create, delete, modify and list files and directories.
 
-  * Block Storage Service has two parts
+  * Block Storage Service, which has two parts:
 
-* Block Management (which is done in Namenode)
+* Block Management (performed in the Namenode)
 
-  * Provides datanode cluster membership by handling registrations, and
+  * Provides Datanode cluster membership by handling registrations, and
 periodic heart beats.
 
   * Processes block reports and maintains location of blocks.
@@ -49,29 +49,29 @@ HDFS Federation
   * Supports block related operations such as create, delete, modify and
 get block location.
 
-  * Manages replica placement and replication of a block for under
-replicated blocks and deletes blocks that are over replicated.
+  * Manages replica placement, block replication for under
+replicated blocks, and deletes blocks that are over replicated.
 
-* Storage - is provided by datanodes by storing blocks on the local file
-  system and allows read/write access.
+* Storage - is provided by Datanodes by storing blocks on the local file
+  system and allowing read/write access.
 
   The prior HDFS architecture allows only a single namespace for the
-  entire cluster. A single Namenode manages this namespace. HDFS
-  Federation addresses limitation of the prior architecture by adding
-  support multiple Namenodes/namespaces to HDFS file system.
+  entire cluster. In that configuration, a single Namenode manages the
+  namespace. HDFS Federation addresses this limitation by adding
+  support for multiple Namenodes/namespaces to HDFS.
 
 * {Multiple Namenodes/Namespaces}
 
   In order to scale the name service horizontally, federation uses multiple
-  independent Namenodes/namespaces. The Namenodes are federated, that is, the
+  independent Namenodes/namespaces. The Namenodes are federated; the
   Namenodes are independent and do not require coordination with each other.
-  The datanodes are used as common storage for blocks by all the Namenodes.
-  Each datanode registers with all the Namenodes in the cluster. Datanodes
-  send periodic heartbeats and block reports and handles commands from the
-  Namenodes.
+  The Datanodes are used as common storage for blocks by all the Namenodes.
+  Each Datanode registers with all the Namenodes in the cluster. Datanodes
+  send periodic heartbeats and block reports. They also handle
+  commands from the Namenodes.
 
-  Users may use {{{./ViewFs.html}ViewFs}} to create personalized namespace 

[01/50] [abbrv] hadoop git commit: HDFS-7643. Test case to ensure lazy persist files cannot be truncated. (Contributed by Yi Liu)

2015-01-26 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-EC 26da8acda - 7ab153e4a


HDFS-7643. Test case to ensure lazy persist files cannot be truncated. 
(Contributed by Yi Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cab6f960
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cab6f960
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cab6f960

Branch: refs/heads/HDFS-EC
Commit: cab6f9608e43727408a4efe51fb2f0522610ef68
Parents: 4995923
Author: Arpit Agarwal a...@apache.org
Authored: Tue Jan 20 21:58:15 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:25 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../fsdataset/impl/TestLazyPersistFiles.java| 26 +++-
 2 files changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cab6f960/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2c7fbc7..1801d2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -281,6 +281,9 @@ Trunk (Unreleased)
 HDFS-7634. Disallow truncation of Lazy persist files. (Yi Liu via
 Arpit Agarwal)
 
+HDFS-7643. Test case to ensure lazy persist files cannot be truncated.
+(Yi Liu via Arpit Agarwal)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cab6f960/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index ad2197a..84ac2a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
@@ -23,13 +23,10 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
@@ -37,9 +34,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.apache.hadoop.hdfs.StorageType.DEFAULT;
@@ -248,6 +242,26 @@ public class TestLazyPersistFiles extends 
LazyPersistTestCase {
   }
 
   /**
+   * Truncate to lazy persist file is denied.
+   * @throws IOException
+   */
+  @Test
+  public void testTruncateIsDenied() throws IOException {
+startUpCluster(true, -1);
+final String METHOD_NAME = GenericTestUtils.getMethodName();
+Path path = new Path(/ + METHOD_NAME + .dat);
+
+makeTestFile(path, BLOCK_SIZE, true);
+
+try {
+  client.truncate(path.toString(), BLOCK_SIZE/2);
+  fail(Truncate to LazyPersist file did not fail as expected);
+} catch (Throwable t) {
+  LOG.info(Got expected exception , t);
+}
+  }
+
+  /**
* If one or more replicas of a lazyPersist file are lost, then the file
* must be discarded by the NN, instead of being kept around as a
* 'corrupt' file.



[14/50] [abbrv] hadoop git commit: HADOOP-11256. Some site docs have inconsistent appearance (Masatake Iwasaki via aw)

2015-01-26 Thread zhz
HADOOP-11256. Some site docs have inconsistent appearance (Masatake Iwasaki via 
aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd457d3a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd457d3a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd457d3a

Branch: refs/heads/HDFS-EC
Commit: bd457d3ac613ed1a780a886774b512c638e37478
Parents: cab6f96
Author: Allen Wittenauer a...@apache.org
Authored: Wed Jan 21 08:44:22 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:26 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../src/site/resources/css/site.css | 30 
 .../src/site/resources/css/site.css | 30 
 .../src/site/resources/css/site.css | 30 
 4 files changed, 93 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd457d3a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 339ccfb..2951002 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -510,6 +510,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-11256. Some site docs have inconsistent appearance (Masatake 
+Iwasaki via aw)
+
 HADOOP-11318. Update the document for hadoop fs -stat (aajisaka)
 
 HADOOP 11400. GraphiteSink does not reconnect to Graphite after 'broken 
pipe' 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd457d3a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/resources/css/site.css
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/resources/css/site.css
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/resources/css/site.css
new file mode 100644
index 000..f830baa
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/resources/css/site.css
@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the License); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an AS IS BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd457d3a/hadoop-tools/hadoop-gridmix/src/site/resources/css/site.css
--
diff --git a/hadoop-tools/hadoop-gridmix/src/site/resources/css/site.css 
b/hadoop-tools/hadoop-gridmix/src/site/resources/css/site.css
new file mode 100644
index 000..f830baa
--- /dev/null
+++ b/hadoop-tools/hadoop-gridmix/src/site/resources/css/site.css
@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the License); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an AS IS BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd457d3a/hadoop-tools/hadoop-rumen/src/site/resources/css/site.css

[16/50] [abbrv] hadoop git commit: MAPREDUCE-5785. Derive heap size or mapreduce.*.memory.mb automatically. (Gera Shegalov and Karthik Kambatla via gera)

2015-01-26 Thread zhz
MAPREDUCE-5785. Derive heap size or mapreduce.*.memory.mb automatically. (Gera 
Shegalov and Karthik Kambatla via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a691658a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a691658a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a691658a

Branch: refs/heads/HDFS-EC
Commit: a691658a86562415b9836c46198ff33d125c68bf
Parents: 4ee89ce
Author: Gera Shegalov g...@apache.org
Authored: Wed Jan 21 18:41:43 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:27 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|   2 +
 .../apache/hadoop/mapred/MapReduceChildJVM.java |  32 +
 .../v2/app/job/impl/TaskAttemptImpl.java|  15 +--
 .../v2/app/job/impl/TestMapReduceChildJVM.java  |  93 +++--
 .../java/org/apache/hadoop/mapred/JobConf.java  | 129 ++-
 .../java/org/apache/hadoop/mapred/Task.java |   6 +-
 .../apache/hadoop/mapreduce/MRJobConfig.java|   5 +
 .../src/main/resources/mapred-default.xml   |  38 --
 8 files changed, 252 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a691658a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 489369d..b28fc65 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -3,6 +3,8 @@ Hadoop MapReduce Change Log
 Trunk (Unreleased)
 
   INCOMPATIBLE CHANGES
+MAPREDUCE-5785. Derive heap size or mapreduce.*.memory.mb automatically.
+(Gera Shegalov and Karthik Kambatla via gera)
 
   NEW FEATURES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a691658a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
index 817b3a5..936dc5a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -27,6 +27,7 @@ import java.util.Vector;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.TaskLog.LogName;
 import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
@@ -99,36 +100,7 @@ public class MapReduceChildJVM {
   }
 
   private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {
-String userClasspath = ;
-String adminClasspath = ;
-if (isMapTask) {
-  userClasspath = 
-  jobConf.get(
-  JobConf.MAPRED_MAP_TASK_JAVA_OPTS, 
-  jobConf.get(
-  JobConf.MAPRED_TASK_JAVA_OPTS, 
-  JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
-  );
-  adminClasspath = 
-  jobConf.get(
-  MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,
-  MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
-} else {
-  userClasspath =
-  jobConf.get(
-  JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, 
-  jobConf.get(
-  JobConf.MAPRED_TASK_JAVA_OPTS,
-  JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
-  );
-  adminClasspath =
-  jobConf.get(
-  MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,
-  MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
-}
-
-// Add admin classpath first so it can be overridden by user.
-return adminClasspath +   + userClasspath;
+return jobConf.getTaskJavaOpts(isMapTask ? TaskType.MAP : TaskType.REDUCE);
   }
 
   public static ListString getVMCommand(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a691658a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
--
diff --git 

[04/50] [abbrv] hadoop git commit: HDFS-7496. Fix FsVolume removal race conditions on the DataNode by reference-counting the volume instances (lei via cmccabe)

2015-01-26 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9014305e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
new file mode 100644
index 000..f92d949
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertNotEquals;
+import static org.mockito.Mockito.mock;
+
+public class FsVolumeListTest {
+
+  private final Configuration conf = new Configuration();
+  private VolumeChoosingPolicyFsVolumeImpl blockChooser =
+  new RoundRobinVolumeChoosingPolicy();
+  private FsDatasetImpl dataset = null;
+  private String baseDir;
+
+  @Before
+  public void setUp() {
+dataset = mock(FsDatasetImpl.class);
+baseDir = new FileSystemTestHelper().getTestRootDir();
+  }
+
+  @Test
+  public void testGetNextVolumeWithClosedVolume() throws IOException {
+FsVolumeList volumeList = new FsVolumeList(0, blockChooser);
+ListFsVolumeImpl volumes = new ArrayList();
+for (int i = 0; i  3; i++) {
+  File curDir = new File(baseDir, nextvolume- + i);
+  curDir.mkdirs();
+  FsVolumeImpl volume = new FsVolumeImpl(dataset, storage-id, curDir,
+  conf, StorageType.DEFAULT);
+  volume.setCapacityForTesting(1024 * 1024 * 1024);
+  volumes.add(volume);
+  volumeList.addVolume(volume);
+}
+
+// Close the second volume.
+volumes.get(1).closeAndWait();
+for (int i = 0; i  10; i++) {
+  try (FsVolumeReference ref =
+  volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
+// volume No.2 will not be chosen.
+assertNotEquals(ref.getVolume(), volumes.get(1));
+  }
+}
+  }
+
+  @Test
+  public void testCheckDirsWithClosedVolume() throws IOException {
+FsVolumeList volumeList = new FsVolumeList(0, blockChooser);
+ListFsVolumeImpl volumes = new ArrayList();
+for (int i = 0; i  3; i++) {
+  File curDir = new File(baseDir, volume- + i);
+  curDir.mkdirs();
+  FsVolumeImpl volume = new FsVolumeImpl(dataset, storage-id, curDir,
+  conf, StorageType.DEFAULT);
+  volumes.add(volume);
+  volumeList.addVolume(volume);
+}
+
+// Close the 2nd volume.
+volumes.get(1).closeAndWait();
+// checkDirs() should ignore the 2nd volume since it is closed.
+volumeList.checkDirs();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9014305e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index aa4b68c..0120dfe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ 

[50/50] [abbrv] hadoop git commit: HDFS-7224. Allow reuse of NN connections via webhdfs. Contributed by Eric Payne

2015-01-26 Thread zhz
 HDFS-7224. Allow reuse of NN connections via webhdfs. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21f5c51b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21f5c51b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21f5c51b

Branch: refs/heads/HDFS-EC
Commit: 21f5c51b87ed394056b79dc5d4010b03edb97dcd
Parents: b2de93d
Author: Kihwal Lee kih...@apache.org
Authored: Mon Jan 26 08:14:30 2015 -0600
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:31 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 20 
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   | 49 
 3 files changed, 64 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21f5c51b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a6cbf8f..39453d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -546,6 +546,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7623. Add htrace configuration properties to core-default.xml and
 update user doc about how to enable htrace. (yliu)
 
+HDFS-7224. Allow reuse of NN connections via webhdfs (Eric Payne via
+kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21f5c51b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 559efdb..460e78b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -312,16 +312,20 @@ public class WebHdfsFileSystem extends FileSystem
 if (in == null) {
   throw new IOException(The  + (useErrorStream? error: input) +  
stream is null.);
 }
-final String contentType = c.getContentType();
-if (contentType != null) {
-  final MediaType parsed = MediaType.valueOf(contentType);
-  if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) {
-throw new IOException(Content-Type \ + contentType
-+ \ is incompatible with \ + MediaType.APPLICATION_JSON
-+ \ (parsed=\ + parsed + \));
+try {
+  final String contentType = c.getContentType();
+  if (contentType != null) {
+final MediaType parsed = MediaType.valueOf(contentType);
+if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) {
+  throw new IOException(Content-Type \ + contentType
+  + \ is incompatible with \ + MediaType.APPLICATION_JSON
+  + \ (parsed=\ + parsed + \));
+}
   }
+  return (Map?, ?)JSON.parse(new InputStreamReader(in, Charsets.UTF_8));
+} finally {
+  in.close();
 }
-return (Map?, ?)JSON.parse(new InputStreamReader(in, Charsets.UTF_8));
   }
 
   private static Map?, ? validateResponse(final HttpOpParam.Op op,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21f5c51b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
index b4216f0..4975a87 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
@@ -17,8 +17,14 @@
  */
 package org.apache.hadoop.hdfs.web;
 
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.doReturn;
+
 import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
 import java.net.URI;
+import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -32,6 +38,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import 

[43/50] [abbrv] hadoop git commit: HDFS-7659. truncate should check negative value of the new length. Contributed by Yi Liu.

2015-01-26 Thread zhz
HDFS-7659. truncate should check negative value of the new length. Contributed 
by Yi Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27c98cca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27c98cca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27c98cca

Branch: refs/heads/HDFS-EC
Commit: 27c98ccab3801456fb5fdc7f8414490ce7ada695
Parents: f8b1ce9
Author: yliu y...@apache.org
Authored: Sat Jan 24 15:41:06 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:30 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java | 4 
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 4 
 .../hadoop/hdfs/server/namenode/TestFileTruncate.java   | 9 +
 4 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27c98cca/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 08f705a..cca755e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -143,6 +143,9 @@ Trunk (Unreleased)
 HDFS-7430. Rewrite the BlockScanner to use O(1) memory and use multiple
 threads (cmccabe)
 
+HDFS-7659. truncate should check negative value of the new length.
+(Yi Liu via shv)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27c98cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 1bb7f4a..21f75a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1984,6 +1984,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
*/
   public boolean truncate(String src, long newLength) throws IOException {
 checkOpen();
+if (newLength  0) {
+  throw new HadoopIllegalArgumentException(
+  Cannot truncate to a negative file size:  + newLength + .);
+}
 TraceScope scope = getPathTraceScope(truncate, src);
 try {
   return namenode.truncate(src, newLength, clientName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27c98cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6a8f574..fae1641 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1911,6 +1911,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   NameNode.stateChangeLog.debug(DIR* NameSystem.truncate: src=
   + src +  newLength= + newLength);
 }
+if (newLength  0) {
+  throw new HadoopIllegalArgumentException(
+  Cannot truncate to a negative file size:  + newLength + .);
+}
 HdfsFileStatus stat = null;
 FSPermissionChecker pc = getPermissionChecker();
 checkOperation(OperationCategory.WRITE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27c98cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 5498b12..1612a24 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -34,6 +34,7 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import 

[30/50] [abbrv] hadoop git commit: HDFS-7575. Fix CHANGES.txt

2015-01-26 Thread zhz
HDFS-7575. Fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8262acf9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8262acf9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8262acf9

Branch: refs/heads/HDFS-EC
Commit: 8262acf91ee7e34cd7235e7ac942ba470df06acc
Parents: c13d501
Author: Arpit Agarwal a...@apache.org
Authored: Thu Jan 22 14:33:06 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:28 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8262acf9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 866b765..7c5c639 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -762,6 +762,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7548. Corrupt block reporting delayed until datablock scanner thread
 detects it (Rushabh Shah via kihwal)
 
+HDFS-7575. Upgrade should generate a unique storage ID for each
+volume. (Arpit Agarwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[40/50] [abbrv] hadoop git commit: HADOOP-11419 improve hadoop-maven-plugins. (Hervé Boutemy via stevel)

2015-01-26 Thread zhz
HADOOP-11419 improve hadoop-maven-plugins.  (Hervé Boutemy via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5adb1252
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5adb1252
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5adb1252

Branch: refs/heads/HDFS-EC
Commit: 5adb125218dd687c2cc632cefe00a8d746963f35
Parents: 38cbafd
Author: Steve Loughran ste...@apache.org
Authored: Sun Jan 25 16:12:58 2015 +
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:30 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  2 ++
 hadoop-maven-plugins/pom.xml | 19 +--
 .../hadoop/maven/plugin/protoc/ProtocMojo.java   |  7 ++-
 .../hadoop/maven/plugin/util/FileSetUtils.java   |  2 +-
 .../plugin/versioninfo/VersionInfoMojo.java  |  3 +--
 5 files changed, 11 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5adb1252/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c0617e8..8618e38 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -497,6 +497,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11489 Dropping dependency on io.netty from hadoop-nfs' pom.xml
 (Ted Yu via ozawa)
 
+HADOOP-11419 Improve hadoop-maven-plugins. (Herve Boutemy via stevel)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5adb1252/hadoop-maven-plugins/pom.xml
--
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index 4beaf7c..b48b9ac 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -28,6 +28,7 @@
   nameApache Hadoop Maven Plugins/name
   properties
 maven.dependency.version3.0/maven.dependency.version
+maven.plugin-tools.version3.4/maven.plugin-tools.version
   /properties
   dependencies
 dependency
@@ -43,30 +44,20 @@
 dependency
   groupIdorg.apache.maven.plugin-tools/groupId
   artifactIdmaven-plugin-annotations/artifactId
-  version${maven.dependency.version}/version
+  version${maven.plugin-tools.version}/version
   scopeprovided/scope
 /dependency
-dependency
-  groupIdjunit/groupId
-  artifactIdjunit/artifactId
-  scopetest/scope
-/dependency
   /dependencies
   build
 plugins
   plugin
 groupIdorg.apache.maven.plugins/groupId
 artifactIdmaven-plugin-plugin/artifactId
-version${maven.dependency.version}/version
-configuration
-  skipErrorNoDescriptorsFoundtrue/skipErrorNoDescriptorsFound
-/configuration
+version${maven.plugin-tools.version}/version
 executions
   execution
-idmojo-descriptor/id
-goals
-  goaldescriptor/goal
-/goals
+iddefault-descriptor/id
+phaseprocess-classes/phase
   /execution
 /executions
   /plugin

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5adb1252/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
--
diff --git 
a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
 
b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
index 86ba7bf..465b713 100644
--- 
a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
+++ 
b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
@@ -31,7 +31,7 @@ import java.util.List;
 @Mojo(name=protoc, defaultPhase = LifecyclePhase.GENERATE_SOURCES)
 public class ProtocMojo extends AbstractMojo {
 
-  @Parameter(defaultValue=${project})
+  @Parameter(defaultValue=${project}, readonly=true)
   private MavenProject project;
 
   @Parameter
@@ -43,7 +43,7 @@ public class ProtocMojo extends AbstractMojo {
   @Parameter(required=true)
   private FileSet source;
 
-  @Parameter
+  @Parameter(defaultValue=protoc)
   private String protocCommand;
 
   @Parameter(required=true)
@@ -51,9 +51,6 @@ public class ProtocMojo extends AbstractMojo {
 
   public void execute() throws MojoExecutionException {
 try {
-  if (protocCommand == null || protocCommand.trim().isEmpty()) {
-protocCommand = protoc;
-  }
   ListString command = new ArrayListString();
   command.add(protocCommand);
 

[25/50] [abbrv] hadoop git commit: HADOOP-11493. Fix some typos in kms-acls.xml description. (Contributed by Charles Lamb)

2015-01-26 Thread zhz
HADOOP-11493. Fix some typos in kms-acls.xml description. (Contributed by 
Charles Lamb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c265a0e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c265a0e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c265a0e3

Branch: refs/heads/HDFS-EC
Commit: c265a0e3f687ae0cda01577f91c9401575545eac
Parents: d3b92a1
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri Jan 23 11:48:19 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Jan 26 09:43:28 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt| 3 +++
 hadoop-common-project/hadoop-kms/src/main/conf/kms-acls.xml| 2 +-
 hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm | 6 +++---
 .../hadoop-kms/src/test/resources/mini-kms-acls-default.xml| 4 ++--
 4 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c265a0e3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 47eaf7b..bab2220 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -750,6 +750,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11008. Remove duplicated description about proxy-user in site 
 documents (Masatake Iwasaki via aw)
 
+HADOOP-11493. Fix some typos in kms-acls.xml description.
+(Charles Lamb via aajisaka)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c265a0e3/hadoop-common-project/hadoop-kms/src/main/conf/kms-acls.xml
--
diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-acls.xml 
b/hadoop-common-project/hadoop-kms/src/main/conf/kms-acls.xml
index 1d5b649..cba69f4 100644
--- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-acls.xml
+++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-acls.xml
@@ -41,7 +41,7 @@
 value*/value
 description
   ACL for rollover-key operations.
-  If the user does is not in the GET ACL, the key material is not returned
+  If the user is not in the GET ACL, the key material is not returned
   as part of the response.
 /description
   /property

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c265a0e3/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
--
diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm 
b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
index 80d9a48..a2dcce3 100644
--- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
@@ -299,7 +299,7 @@ $ keytool -genkey -alias tomcat -keyalg RSA
 value*/value
 description
   ACL for create-key operations.
-  If the user does is not in the GET ACL, the key material is not returned
+  If the user is not in the GET ACL, the key material is not returned
   as part of the response.
 /description
   /property
@@ -309,7 +309,7 @@ $ keytool -genkey -alias tomcat -keyalg RSA
 valuehdfs,foo/value
 description
   Blacklist for create-key operations.
-  If the user does is in the Blacklist, the key material is not returned
+  If the user is in the Blacklist, the key material is not returned
   as part of the response.
 /description
   /property
@@ -335,7 +335,7 @@ $ keytool -genkey -alias tomcat -keyalg RSA
 value*/value
 description
   ACL for rollover-key operations.
-  If the user does is not in the GET ACL, the key material is not returned
+  If the user is not in the GET ACL, the key material is not returned
   as part of the response.
 /description
   /property

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c265a0e3/hadoop-common-project/hadoop-kms/src/test/resources/mini-kms-acls-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/resources/mini-kms-acls-default.xml 
b/hadoop-common-project/hadoop-kms/src/test/resources/mini-kms-acls-default.xml
index 24a46b8..6ac4155 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/resources/mini-kms-acls-default.xml
+++ 
b/hadoop-common-project/hadoop-kms/src/test/resources/mini-kms-acls-default.xml
@@ -23,7 +23,7 @@
 value*/value
 description
   ACL for create-key operations.
-  If the user does is not in the GET ACL, the key material is not returned
+  If the user is not in the GET ACL, 

[2/2] hadoop git commit: HADOOP-6221 RPC Client operations cannot be interrupted (stevel)

2015-01-26 Thread stevel
HADOOP-6221 RPC Client operations cannot be interrupted (stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f2b6956
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f2b6956
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f2b6956

Branch: refs/heads/trunk
Commit: 1f2b6956c2012a7d6ea7e7ba5116d3ad71c23d7e
Parents: 21d5599
Author: Steve Loughran ste...@apache.org
Authored: Mon Jan 26 22:04:45 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jan 26 22:04:56 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 .../main/java/org/apache/hadoop/ipc/Client.java |   6 +
 .../main/java/org/apache/hadoop/ipc/RPC.java|   9 +-
 .../apache/hadoop/net/SocketIOWithTimeout.java  |  12 +-
 .../apache/hadoop/ipc/TestRPCWaitForProxy.java  | 130 +++
 5 files changed, 152 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f2b6956/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e0da851..2806ee2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -763,6 +763,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11499. Check of executorThreadsStarted in
 ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
 
+HADOOP-6221 RPC Client operations cannot be interrupted. (stevel)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f2b6956/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 45a4660..dfde136 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -849,6 +849,12 @@ public class Client {
 throw ioe;
   }
 
+  // Throw the exception if the thread is interrupted
+  if (Thread.currentThread().isInterrupted()) {
+LOG.warn(Interrupted while trying for connection);
+throw ioe;
+  }
+
   try {
 Thread.sleep(action.delayMillis);
   } catch (InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f2b6956/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 40f6515..8ada0ff 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -412,11 +412,18 @@ public class RPC {
 throw ioe;
   }
 
+  if (Thread.currentThread().isInterrupted()) {
+// interrupted during some IO; this may not have been caught
+throw new InterruptedIOException(Interrupted waiting for the proxy);
+  }
+
   // wait for retry
   try {
 Thread.sleep(1000);
   } catch (InterruptedException ie) {
-// IGNORE
+Thread.currentThread().interrupt();
+throw (IOException) new InterruptedIOException(
+Interrupted waiting for the proxy).initCause(ioe);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f2b6956/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
index ed12b3c..b50f7e9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
@@ -338,6 +338,12 @@ abstract class SocketIOWithTimeout {
 return ret;
   }
   
+  if (Thread.currentThread().isInterrupted()) {
+throw new InterruptedIOException(Interrupted while waiting for 
++ IO on channel  + channel + .  + timeout
+  

hadoop git commit: HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse -D parameters before -files. Contributed by Xuan Gong

2015-01-26 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1f2b6956c - 0bf333911


HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse
-D parameters before -files. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bf33391
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bf33391
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bf33391

Branch: refs/heads/trunk
Commit: 0bf333911c950f22ec0f784bf465306e20b0d507
Parents: 1f2b695
Author: Xuan xg...@apache.org
Authored: Mon Jan 26 15:35:35 2015 -0800
Committer: Xuan xg...@apache.org
Committed: Mon Jan 26 15:35:35 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop/util/GenericOptionsParser.java   | 20 +++-
 2 files changed, 14 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bf33391/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2806ee2..f1aab62 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -765,6 +765,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-6221 RPC Client operations cannot be interrupted. (stevel)
 
+HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse -D
+parameters before -files. (xgong)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bf33391/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index d0e7655..0a46a7a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -284,6 +284,17 @@ public class GenericOptionsParser {
 conf.addResource(new Path(value));
   }
 }
+
+if (line.hasOption('D')) {
+  String[] property = line.getOptionValues('D');
+  for(String prop : property) {
+String[] keyval = prop.split(=, 2);
+if (keyval.length == 2) {
+  conf.set(keyval[0], keyval[1], from command line);
+}
+  }
+}
+
 if (line.hasOption(libjars)) {
   conf.set(tmpjars, 
validateFiles(line.getOptionValue(libjars), conf),
@@ -307,15 +318,6 @@ public class GenericOptionsParser {
 validateFiles(line.getOptionValue(archives), conf),
 from -archives command line option);
 }
-if (line.hasOption('D')) {
-  String[] property = line.getOptionValues('D');
-  for(String prop : property) {
-String[] keyval = prop.split(=, 2);
-if (keyval.length == 2) {
-  conf.set(keyval[0], keyval[1], from command line);
-}
-  }
-}
 conf.setBoolean(mapreduce.client.genericoptionsparser.used, true);
 
 // tokensFile



hadoop git commit: HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse -D parameters before -files. Contributed by Xuan Gong

2015-01-26 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0ada35c91 - f93fa3938


HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse
-D parameters before -files. Contributed by Xuan Gong

(cherry picked from commit 0bf333911c950f22ec0f784bf465306e20b0d507)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f93fa393
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f93fa393
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f93fa393

Branch: refs/heads/branch-2
Commit: f93fa39384174711e82076e1f1a655ecf3c776b9
Parents: 0ada35c
Author: Xuan xg...@apache.org
Authored: Mon Jan 26 15:35:35 2015 -0800
Committer: Xuan xg...@apache.org
Committed: Mon Jan 26 15:37:14 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop/util/GenericOptionsParser.java   | 20 +++-
 2 files changed, 14 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f93fa393/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bc22de7..b4581e8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -409,6 +409,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-6221 RPC Client operations cannot be interrupted. (stevel)
 
+HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse -D
+parameters before -files. (xgong)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f93fa393/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index d0e7655..0a46a7a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -284,6 +284,17 @@ public class GenericOptionsParser {
 conf.addResource(new Path(value));
   }
 }
+
+if (line.hasOption('D')) {
+  String[] property = line.getOptionValues('D');
+  for(String prop : property) {
+String[] keyval = prop.split(=, 2);
+if (keyval.length == 2) {
+  conf.set(keyval[0], keyval[1], from command line);
+}
+  }
+}
+
 if (line.hasOption(libjars)) {
   conf.set(tmpjars, 
validateFiles(line.getOptionValue(libjars), conf),
@@ -307,15 +318,6 @@ public class GenericOptionsParser {
 validateFiles(line.getOptionValue(archives), conf),
 from -archives command line option);
 }
-if (line.hasOption('D')) {
-  String[] property = line.getOptionValues('D');
-  for(String prop : property) {
-String[] keyval = prop.split(=, 2);
-if (keyval.length == 2) {
-  conf.set(keyval[0], keyval[1], from command line);
-}
-  }
-}
 conf.setBoolean(mapreduce.client.genericoptionsparser.used, true);
 
 // tokensFile



hadoop git commit: YARN-3092. Created a common ResourceUsage class to track labeled resource usages in Capacity Scheduler. Contributed by Wangda Tan (cherry picked from commit 6f9fe76918bbc79109653edc

2015-01-26 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f93fa3938 - 61b4116b4


YARN-3092. Created a common ResourceUsage class to track labeled resource 
usages in Capacity Scheduler. Contributed by Wangda Tan
(cherry picked from commit 6f9fe76918bbc79109653edc6cde85df05148ba3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61b4116b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61b4116b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61b4116b

Branch: refs/heads/branch-2
Commit: 61b4116b4b3c0eec8f514f079debd88bc757b28e
Parents: f93fa39
Author: Jian He jia...@apache.org
Authored: Mon Jan 26 15:21:22 2015 -0800
Committer: Jian He jia...@apache.org
Committed: Mon Jan 26 15:38:27 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/ResourceUsage.java| 332 +++
 .../scheduler/TestResourceUsage.java| 138 
 3 files changed, 473 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61b4116b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b811023..8fc1d23 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -169,6 +169,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3024. LocalizerRunner should give DIE action when all resources are
 localized. (Chengbing Liu via xgong)
 
+YARN-3092. Created a common ResourceUsage class to track labeled resource
+usages in Capacity Scheduler. (Wangda Tan via jianhe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61b4116b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
new file mode 100644
index 000..5a4cced
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
@@ -0,0 +1,332 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.util.resource.Resources;
+
+/**
+ * Resource Usage by Labels for following fields by label - AM resource (to
+ * enforce max-am-resource-by-label after YARN-2637) - Used resource (includes
+ * AM resource usage) - Reserved resource - Pending resource - Headroom
+ * 
+ * This class can be used to track resource usage in queue/user/app.
+ * 
+ * And it is thread-safe
+ */
+public class ResourceUsage {
+  private ReadLock readLock;
+  private WriteLock writeLock;
+  private MapString, UsageByLabel usages;
+  // short for no-label :)
+  private static final String NL = CommonNodeLabelsManager.NO_LABEL;
+
+  public ResourceUsage() {
+ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+readLock = lock.readLock();
+writeLock = lock.writeLock();
+
+usages = new HashMapString, UsageByLabel();
+  }
+
+  // Usage enum here to make implement cleaner
+  private enum ResourceType {
+

hadoop git commit: YARN-3092. Created a common ResourceUsage class to track labeled resource usages in Capacity Scheduler. Contributed by Wangda Tan

2015-01-26 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0bf333911 - 6f9fe7691


YARN-3092. Created a common ResourceUsage class to track labeled resource 
usages in Capacity Scheduler. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f9fe769
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f9fe769
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f9fe769

Branch: refs/heads/trunk
Commit: 6f9fe76918bbc79109653edc6cde85df05148ba3
Parents: 0bf3339
Author: Jian He jia...@apache.org
Authored: Mon Jan 26 15:21:22 2015 -0800
Committer: Jian He jia...@apache.org
Committed: Mon Jan 26 15:38:00 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/ResourceUsage.java| 332 +++
 .../scheduler/TestResourceUsage.java| 138 
 3 files changed, 473 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9fe769/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 872f16e..924bfa6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -203,6 +203,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3024. LocalizerRunner should give DIE action when all resources are
 localized. (Chengbing Liu via xgong)
 
+YARN-3092. Created a common ResourceUsage class to track labeled resource
+usages in Capacity Scheduler. (Wangda Tan via jianhe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9fe769/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
new file mode 100644
index 000..5a4cced
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
@@ -0,0 +1,332 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.util.resource.Resources;
+
+/**
+ * Resource Usage by Labels for following fields by label - AM resource (to
+ * enforce max-am-resource-by-label after YARN-2637) - Used resource (includes
+ * AM resource usage) - Reserved resource - Pending resource - Headroom
+ * 
+ * This class can be used to track resource usage in queue/user/app.
+ * 
+ * And it is thread-safe
+ */
+public class ResourceUsage {
+  private ReadLock readLock;
+  private WriteLock writeLock;
+  private MapString, UsageByLabel usages;
+  // short for no-label :)
+  private static final String NL = CommonNodeLabelsManager.NO_LABEL;
+
+  public ResourceUsage() {
+ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+readLock = lock.readLock();
+writeLock = lock.writeLock();
+
+usages = new HashMapString, UsageByLabel();
+  }
+
+  // Usage enum here to make implement cleaner
+  private enum ResourceType {
+USED(0), PENDING(1), AMUSED(2), RESERVED(3), HEADROOM(4);
+
+private int 

[1/2] hadoop git commit: HADOOP-6221 RPC Client operations cannot be interrupted (stevel)

2015-01-26 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7f1441291 - 0ada35c91
  refs/heads/trunk 21d559906 - 1f2b6956c


HADOOP-6221 RPC Client operations cannot be interrupted (stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ada35c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ada35c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ada35c9

Branch: refs/heads/branch-2
Commit: 0ada35c91244e8abb03df08fa8d9a728ca0eb692
Parents: 7f14412
Author: Steve Loughran ste...@apache.org
Authored: Mon Jan 26 22:04:45 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jan 26 22:04:45 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 .../main/java/org/apache/hadoop/ipc/Client.java |   6 +
 .../main/java/org/apache/hadoop/ipc/RPC.java|   9 +-
 .../apache/hadoop/net/SocketIOWithTimeout.java  |  12 +-
 .../apache/hadoop/ipc/TestRPCWaitForProxy.java  | 130 +++
 5 files changed, 152 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ada35c9/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e37bffe..bc22de7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -407,6 +407,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11499. Check of executorThreadsStarted in
 ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
 
+HADOOP-6221 RPC Client operations cannot be interrupted. (stevel)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ada35c9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index d9385fb..65192f6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -849,6 +849,12 @@ public class Client {
 throw ioe;
   }
 
+  // Throw the exception if the thread is interrupted
+  if (Thread.currentThread().isInterrupted()) {
+LOG.warn(Interrupted while trying for connection);
+throw ioe;
+  }
+
   try {
 Thread.sleep(action.delayMillis);
   } catch (InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ada35c9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 7f6d9b6..797b719 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -408,11 +408,18 @@ public class RPC {
 throw ioe;
   }
 
+  if (Thread.currentThread().isInterrupted()) {
+// interrupted during some IO; this may not have been caught
+throw new InterruptedIOException(Interrupted waiting for the proxy);
+  }
+
   // wait for retry
   try {
 Thread.sleep(1000);
   } catch (InterruptedException ie) {
-// IGNORE
+Thread.currentThread().interrupt();
+throw (IOException) new InterruptedIOException(
+Interrupted waiting for the proxy).initCause(ioe);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ada35c9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
index ed12b3c..b50f7e9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
@@ -338,6 +338,12 @@ abstract class SocketIOWithTimeout {
 return ret;
   }
   
+  if (Thread.currentThread().isInterrupted()) {
+