[1/2] hadoop git commit: HADOOP-12235 hadoop-openstack junit mockito dependencies should be provided. (Ted Yu via stevel)

2015-07-20 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eddb32666 - f3296a198
  refs/heads/trunk 176131f12 - 05130e94c


HADOOP-12235 hadoop-openstack junit  mockito dependencies should be 
provided. (Ted Yu via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3296a19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3296a19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3296a19

Branch: refs/heads/branch-2
Commit: f3296a19846e4cc2ea64b2293349079792055783
Parents: eddb326
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 11:22:22 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 11:22:22 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-tools/hadoop-openstack/pom.xml   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3296a19/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1fde48e..ed9fdb6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -481,6 +481,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12240. Fix tests requiring native library to be skipped in 
non-native
 profile. (Masatake Iwasaki via ozawa)
 
+HADOOP-12235 hadoop-openstack junit  mockito dependencies should be
+provided. (Ted Yu via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3296a19/hadoop-tools/hadoop-openstack/pom.xml
--
diff --git a/hadoop-tools/hadoop-openstack/pom.xml 
b/hadoop-tools/hadoop-openstack/pom.xml
index e4159f5..3e6ed6f 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -128,12 +128,12 @@
 dependency
   groupIdjunit/groupId
   artifactIdjunit/artifactId
-  scopecompile/scope
+  scopeprovided/scope
 /dependency
 dependency
   groupIdorg.mockito/groupId
   artifactIdmockito-all/artifactId
-  scopecompile/scope
+  scopeprovided/scope
 /dependency
 dependency
   groupIdcom.google.guava/groupId



[2/2] hadoop git commit: HADOOP-12235 hadoop-openstack junit mockito dependencies should be provided. (Ted Yu via stevel)

2015-07-20 Thread stevel
HADOOP-12235 hadoop-openstack junit  mockito dependencies should be 
provided. (Ted Yu via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05130e94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05130e94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05130e94

Branch: refs/heads/trunk
Commit: 05130e94c5223a8ed70a7fb5d1398e5d536f5f03
Parents: 176131f
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 11:22:22 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 11:22:39 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-tools/hadoop-openstack/pom.xml   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05130e94/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 62703c3..481d7de 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -969,6 +969,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12240. Fix tests requiring native library to be skipped in 
non-native
 profile. (Masatake Iwasaki via ozawa)
 
+HADOOP-12235 hadoop-openstack junit  mockito dependencies should be
+provided. (Ted Yu via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05130e94/hadoop-tools/hadoop-openstack/pom.xml
--
diff --git a/hadoop-tools/hadoop-openstack/pom.xml 
b/hadoop-tools/hadoop-openstack/pom.xml
index afdda99..1b541e2 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -128,12 +128,12 @@
 dependency
   groupIdjunit/groupId
   artifactIdjunit/artifactId
-  scopecompile/scope
+  scopeprovided/scope
 /dependency
 dependency
   groupIdorg.mockito/groupId
   artifactIdmockito-all/artifactId
-  scopecompile/scope
+  scopeprovided/scope
 /dependency
 dependency
   groupIdcom.google.guava/groupId



[2/2] hadoop git commit: HADOOP-12209 Comparable type should be in FileStatus. (Yong Zhang via stevel)

2015-07-20 Thread stevel
HADOOP-12209 Comparable type should be in FileStatus.   (Yong Zhang via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9141e1aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9141e1aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9141e1aa

Branch: refs/heads/trunk
Commit: 9141e1aa16561e44f73e00b349735f530c94acc3
Parents: 05130e9
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 12:32:32 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 12:32:44 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/fs/FileStatus.java   | 15 +
 .../org/apache/hadoop/fs/LocatedFileStatus.java | 10 +++--
 .../fs/viewfs/ViewFsLocatedFileStatus.java  |  3 ++-
 .../org/apache/hadoop/fs/TestFileStatus.java| 22 
 5 files changed, 35 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 481d7de..18475b9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -972,6 +972,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12235 hadoop-openstack junit  mockito dependencies should be
 provided. (Ted Yu via stevel)
 
+HADOOP-12209 Comparable type should be in FileStatus.
+(Yong Zhang via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 98757a7..6a79768 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.io.Writable;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FileStatus implements Writable, Comparable {
+public class FileStatus implements Writable, ComparableFileStatus {
 
   private Path path;
   private long length;
@@ -323,19 +323,14 @@ public class FileStatus implements Writable, Comparable {
   }
 
   /**
-   * Compare this object to another object
-   * 
-   * @param   o the object to be compared.
+   * Compare this FileStatus to another FileStatus
+   * @param   o the FileStatus to be compared.
* @return  a negative integer, zero, or a positive integer as this object
*   is less than, equal to, or greater than the specified object.
-   * 
-   * @throws ClassCastException if the specified object's is not of 
-   * type FileStatus
*/
   @Override
-  public int compareTo(Object o) {
-FileStatus other = (FileStatus)o;
-return this.getPath().compareTo(other.getPath());
+  public int compareTo(FileStatus o) {
+return this.getPath().compareTo(o.getPath());
   }
   
   /** Compare if this object is equal to another object

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index 9e920c5..588fd6a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -90,17 +90,13 @@ public class LocatedFileStatus extends FileStatus {
   }
   
   /**
-   * Compare this object to another object
-   * 
-   * @param   o the object to be compared.
+   * Compare this FileStatus to another FileStatus
+   * @param   o the FileStatus to be compared.
* @return  a negative integer, zero, or a positive integer as this object
*   is less than, equal to, or greater than the specified object.
-   * 
-   * @throws ClassCastException if the specified object's is not of 
-   * type FileStatus
*/
   @Override
-  public int compareTo(Object o) {
+  public int compareTo(FileStatus o) {
 return super.compareTo(o);
   }
   


[1/2] hadoop git commit: HADOOP-12209 Comparable type should be in FileStatus. (Yong Zhang via stevel)

2015-07-20 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f3296a198 - 208b9eed9
  refs/heads/trunk 05130e94c - 9141e1aa1


HADOOP-12209 Comparable type should be in FileStatus.   (Yong Zhang via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/208b9eed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/208b9eed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/208b9eed

Branch: refs/heads/branch-2
Commit: 208b9eed9e5ba1ead52165d6f9bd1905ccab97ca
Parents: f3296a1
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 12:32:32 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 12:32:32 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/fs/FileStatus.java   | 15 +
 .../org/apache/hadoop/fs/LocatedFileStatus.java | 10 +++--
 .../fs/viewfs/ViewFsLocatedFileStatus.java  |  3 ++-
 .../org/apache/hadoop/fs/TestFileStatus.java| 22 
 5 files changed, 35 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/208b9eed/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ed9fdb6..dfc807c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -484,6 +484,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12235 hadoop-openstack junit  mockito dependencies should be
 provided. (Ted Yu via stevel)
 
+HADOOP-12209 Comparable type should be in FileStatus.
+(Yong Zhang via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/208b9eed/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index da3807d..3481010 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.io.Writable;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FileStatus implements Writable, Comparable {
+public class FileStatus implements Writable, ComparableFileStatus {
 
   private Path path;
   private long length;
@@ -323,19 +323,14 @@ public class FileStatus implements Writable, Comparable {
   }
 
   /**
-   * Compare this object to another object
-   * 
-   * @param   o the object to be compared.
+   * Compare this FileStatus to another FileStatus
+   * @param   o the FileStatus to be compared.
* @return  a negative integer, zero, or a positive integer as this object
*   is less than, equal to, or greater than the specified object.
-   * 
-   * @throws ClassCastException if the specified object's is not of 
-   * type FileStatus
*/
   @Override
-  public int compareTo(Object o) {
-FileStatus other = (FileStatus)o;
-return this.getPath().compareTo(other.getPath());
+  public int compareTo(FileStatus o) {
+return this.getPath().compareTo(o.getPath());
   }
   
   /** Compare if this object is equal to another object

http://git-wip-us.apache.org/repos/asf/hadoop/blob/208b9eed/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index 9e920c5..588fd6a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -90,17 +90,13 @@ public class LocatedFileStatus extends FileStatus {
   }
   
   /**
-   * Compare this object to another object
-   * 
-   * @param   o the object to be compared.
+   * Compare this FileStatus to another FileStatus
+   * @param   o the FileStatus to be compared.
* @return  a negative integer, zero, or a positive integer as this object
*   is less than, equal to, or greater than the specified object.
-   * 
-   * @throws ClassCastException if the specified object's is not of 
-   * type FileStatus
*/
   @Override
-  public int 

[4/6] hadoop git commit: HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString() over getMessage() in logging/span events. (Varun Saxena via stevel)

2015-07-20 Thread stevel
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()  over 
getMessage() in logging/span events. (Varun Saxena via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9431425
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9431425
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9431425

Branch: refs/heads/trunk
Commit: a9431425d1aff657fc1ea501c706235f2ebc518f
Parents: 05fa336
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 13:13:09 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 13:13:23 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9431425/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bfa9aac..1b643a9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -978,6 +978,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).
 (Brahma Reddy Battula via stevel)
 
+HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
+over getMessage() in logging/span events. (Varun Saxena via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9431425/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index e75de15..cc75f5c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -238,7 +238,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 }
 if (Trace.isTracing()) {
   traceScope.getSpan().addTimelineAnnotation(
-  Call got exception:  + e.getMessage());
+  Call got exception:  + e.toString());
 }
 throw new ServiceException(e);
   } finally {



[1/6] hadoop git commit: HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json). (Brahma Reddy Battula via stevel)

2015-07-20 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 208b9eed9 - d3c23c7b1
  refs/heads/trunk 9141e1aa1 - 98c2bc87b


HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).   
(Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d68b044f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d68b044f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d68b044f

Branch: refs/heads/branch-2
Commit: d68b044f0f477b46f527b9d64b8aa5f2c7a6bb10
Parents: 208b9ee
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 13:02:51 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 13:02:51 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d68b044f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index dfc807c..1769f02 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -487,6 +487,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12209 Comparable type should be in FileStatus.
 (Yong Zhang via stevel)
 
+HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).
+(Brahma Reddy Battula via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d68b044f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 149424f..4698a83 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -544,7 +544,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   // AuthenticatedURL properly to set authToken post initialization)
 }
 HttpExceptionUtils.validateResponse(conn, expectedResponse);
-if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
+if (conn.getContentType() != null
+ conn.getContentType().trim().toLowerCase()
+.startsWith(APPLICATION_JSON_MIME)
  klass != null) {
   ObjectMapper mapper = new ObjectMapper();
   InputStream is = null;



[3/6] hadoop git commit: HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString() over getMessage() in logging/span events. (Varun Saxena via stevel)

2015-07-20 Thread stevel
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()  over 
getMessage() in logging/span events. (Varun Saxena via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/577acf89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/577acf89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/577acf89

Branch: refs/heads/branch-2
Commit: 577acf89f81fb0e164d94cb9f32e1b9ca819ae3e
Parents: d68b044
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 13:13:09 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 13:13:09 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/577acf89/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1769f02..5b72889 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -490,6 +490,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).
 (Brahma Reddy Battula via stevel)
 
+HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
+over getMessage() in logging/span events. (Varun Saxena via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/577acf89/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 76be837..abb494a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -238,7 +238,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 }
 if (Trace.isTracing()) {
   traceScope.getSpan().addTimelineAnnotation(
-  Call got exception:  + e.getMessage());
+  Call got exception:  + e.toString());
 }
 throw new ServiceException(e);
   } finally {



[2/6] hadoop git commit: HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json). (Brahma Reddy Battula via stevel)

2015-07-20 Thread stevel
HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).   
(Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05fa3368
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05fa3368
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05fa3368

Branch: refs/heads/trunk
Commit: 05fa3368f12d189a95a2d6cd8eebc6f7e3a719ee
Parents: 9141e1a
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 13:02:51 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 13:03:03 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05fa3368/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 18475b9..bfa9aac 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -975,6 +975,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12209 Comparable type should be in FileStatus.
 (Yong Zhang via stevel)
 
+HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).
+(Brahma Reddy Battula via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05fa3368/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 223e69a..1ffc44d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -544,7 +544,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   // AuthenticatedURL properly to set authToken post initialization)
 }
 HttpExceptionUtils.validateResponse(conn, expectedResponse);
-if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
+if (conn.getContentType() != null
+ conn.getContentType().trim().toLowerCase()
+.startsWith(APPLICATION_JSON_MIME)
  klass != null) {
   ObjectMapper mapper = new ObjectMapper();
   InputStream is = null;



hadoop git commit: HADOOP-12237. releasedocmaker.py doesn't work behind a proxy (Tsuyoshi Ozawa via aw)

2015-07-20 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12111 d84da00b1 - b41fe3111


HADOOP-12237. releasedocmaker.py doesn't work behind a proxy (Tsuyoshi Ozawa 
via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b41fe311
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b41fe311
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b41fe311

Branch: refs/heads/HADOOP-12111
Commit: b41fe3111ae37478cbace2a07e6ac35a676ef978
Parents: d84da00
Author: Allen Wittenauer a...@apache.org
Authored: Mon Jul 20 09:47:46 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Mon Jul 20 09:47:46 2015 -0700

--
 dev-support/releasedocmaker.py | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b41fe311/dev-support/releasedocmaker.py
--
diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py
index 409d8e3..d2e5dda 100755
--- a/dev-support/releasedocmaker.py
+++ b/dev-support/releasedocmaker.py
@@ -24,6 +24,7 @@ import os
 import re
 import sys
 import urllib
+import urllib2
 try:
   import json
 except ImportError:
@@ -125,7 +126,7 @@ class GetVersions:
 versions.sort()
 print Looking for %s through %s%(versions[0],versions[-1])
 for p in projects:
-  resp = 
urllib.urlopen(https://issues.apache.org/jira/rest/api/2/project/%s/versions%p)
+  resp = 
urllib2.urlopen(https://issues.apache.org/jira/rest/api/2/project/%s/versions%p)
   data = json.loads(resp.read())
   for d in data:
 if d['name'][0].isdigit and versions[0] = d['name'] and d['name'] = 
versions[-1]:
@@ -288,7 +289,7 @@ class JiraIter:
 self.projects = projects
 v=str(version).replace(-SNAPSHOT,)
 
-resp = urllib.urlopen(https://issues.apache.org/jira/rest/api/2/field;)
+resp = urllib2.urlopen(https://issues.apache.org/jira/rest/api/2/field;)
 data = json.loads(resp.read())
 
 self.fieldIdMap = {}
@@ -301,7 +302,7 @@ class JiraIter:
 count=100
 while (at  end):
   params = urllib.urlencode({'jql': project in ('+' , 
'.join(projects)+') and fixVersion in ('+v+') and resolution = Fixed, 
'startAt':at, 'maxResults':count})
-  resp = 
urllib.urlopen(https://issues.apache.org/jira/rest/api/2/search?%s%params)
+  resp = 
urllib2.urlopen(https://issues.apache.org/jira/rest/api/2/search?%s%params)
   data = json.loads(resp.read())
   if (data.has_key('errorMessages')):
 raise Exception(data['errorMessages'])
@@ -407,6 +408,10 @@ def main():
   if (len(options.versions) = 0):
 parser.error(At least one version needs to be supplied)
 
+  proxy = urllib2.ProxyHandler()
+  opener = urllib2.build_opener(proxy)
+  urllib2.install_opener(opener)
+
   projects = options.projects
 
   if (options.range is True):



hadoop git commit: HADOOP-12060. Fix ByteBuffer usage for raw erasure coders. Contributed by Kai Zheng.

2015-07-20 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 06394e376 - 29495cb8f


HADOOP-12060. Fix ByteBuffer usage for raw erasure coders. Contributed by Kai 
Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29495cb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29495cb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29495cb8

Branch: refs/heads/HDFS-7285
Commit: 29495cb8f6b940caa9964c39a290ef233ce1ec7c
Parents: 06394e3
Author: Jing Zhao ji...@apache.org
Authored: Mon Jul 20 10:15:14 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Jul 20 10:15:14 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  5 +-
 .../apache/hadoop/io/erasurecode/CodecUtil.java | 38 
 .../apache/hadoop/io/erasurecode/ECBlock.java   | 14 +--
 .../hadoop/io/erasurecode/ECBlockGroup.java |  6 +-
 .../apache/hadoop/io/erasurecode/ECChunk.java   |  6 +-
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 18 ++--
 .../hadoop/io/erasurecode/SchemaLoader.java |  3 +-
 .../rawcoder/AbstractRawErasureCoder.java   | 42 +
 .../rawcoder/AbstractRawErasureDecoder.java | 50 +--
 .../rawcoder/AbstractRawErasureEncoder.java | 27 +++---
 .../rawcoder/RawErasureCoderFactory.java|  8 +-
 .../erasurecode/rawcoder/RawErasureDecoder.java | 25 --
 .../erasurecode/rawcoder/RawErasureEncoder.java | 24 --
 .../hadoop/io/erasurecode/BufferAllocator.java  | 91 
 .../hadoop/io/erasurecode/TestCoderBase.java| 17 +++-
 .../erasurecode/coder/TestErasureCoderBase.java | 10 +--
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 13 +--
 17 files changed, 268 insertions(+), 129 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29495cb8/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 9ccd3a7..1f3006e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -68,4 +68,7 @@
 
 HADOOP-12065. Using more meaningful keys in EC schema. (Kai Zheng)
 
-HDFS-8557. Allow to configure RS and XOR raw coders (Kai Zheng)
\ No newline at end of file
+HDFS-8557. Allow to configure RS and XOR raw coders (Kai Zheng)
+
+HADOOP-12060. Fix ByteBuffer usage for raw erasure coders. (Kai Zheng via
+jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29495cb8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
index 5d22624..027d58b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
@@ -22,17 +22,17 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.erasurecode.rawcoder.*;
 
 /**
- * A codec utility.
+ * A codec  coder utility to help create raw coders conveniently.
  */
 public final class CodecUtil {
 
-  private CodecUtil() {}
+  private CodecUtil() { }
 
   /**
* Create RS raw encoder according to configuration.
-   * @param conf
-   * @param numDataUnits
-   * @param numParityUnits
+   * @param conf configuration possibly with some items to configure the coder
+   * @param numDataUnits number of data units in a coding group
+   * @param numParityUnits number of parity units in a coding group
* @return raw encoder
*/
   public static RawErasureEncoder createRSRawEncoder(
@@ -49,9 +49,9 @@ public final class CodecUtil {
 
   /**
* Create RS raw decoder according to configuration.
-   * @param conf
-   * @param numDataUnits
-   * @param numParityUnits
+   * @param conf configuration possibly with some items to configure the coder
+   * @param numDataUnits number of data units in a coding group
+   * @param numParityUnits number of parity units in a coding group
* @return raw decoder
*/
   public static RawErasureDecoder createRSRawDecoder(
@@ -68,9 +68,9 @@ public final class CodecUtil {
 
   /**
* Create XOR raw encoder according to configuration.
-   * @param conf
-   * @param numDataUnits
-   * @param numParityUnits
+   * @param conf configuration possibly with some items to configure the coder
+   * @param numDataUnits number of data units 

[14/17] hadoop git commit: HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString() over getMessage() in logging/span events. (Varun Saxena via stevel)

2015-07-20 Thread aw
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()  over 
getMessage() in logging/span events. (Varun Saxena via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9431425
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9431425
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9431425

Branch: refs/heads/HADOOP-12111
Commit: a9431425d1aff657fc1ea501c706235f2ebc518f
Parents: 05fa336
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 13:13:09 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 13:13:23 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9431425/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bfa9aac..1b643a9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -978,6 +978,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).
 (Brahma Reddy Battula via stevel)
 
+HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
+over getMessage() in logging/span events. (Varun Saxena via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9431425/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index e75de15..cc75f5c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -238,7 +238,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 }
 if (Trace.isTracing()) {
   traceScope.getSpan().addTimelineAnnotation(
-  Call got exception:  + e.getMessage());
+  Call got exception:  + e.toString());
 }
 throw new ServiceException(e);
   } finally {



[15/17] hadoop git commit: HADOOP-11893. Mark org.apache.hadoop.security.token.Token as @InterfaceAudience.Public. (Brahma Reddy Battula via stevel)

2015-07-20 Thread aw
HADOOP-11893. Mark org.apache.hadoop.security.token.Token as 
@InterfaceAudience.Public. (Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98c2bc87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98c2bc87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98c2bc87

Branch: refs/heads/HADOOP-12111
Commit: 98c2bc87b1445c533268c58d382ea4e4297303fd
Parents: a943142
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 13:22:03 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 13:22:14 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../main/java/org/apache/hadoop/security/token/SecretManager.java | 2 +-
 .../src/main/java/org/apache/hadoop/security/token/Token.java | 2 +-
 .../java/org/apache/hadoop/security/token/TokenIdentifier.java| 2 +-
 .../src/main/java/org/apache/hadoop/security/token/TokenInfo.java | 2 +-
 .../main/java/org/apache/hadoop/security/token/TokenRenewer.java  | 2 +-
 .../main/java/org/apache/hadoop/security/token/TokenSelector.java | 2 +-
 .../main/java/org/apache/hadoop/security/token/package-info.java  | 2 +-
 8 files changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1b643a9..a23a508 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -702,6 +702,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12153. ByteBufferReadable doesn't declare @InterfaceAudience and
 @InterfaceStability. (Brahma Reddy Battula via ozawa)
 
+HADOOP-11893. Mark org.apache.hadoop.security.token.Token as
+@InterfaceAudience.Public. (Brahma Reddy Battula via stevel)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
index 5fe0391..798c8c9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.ipc.StandbyException;
  * The server-side secret manager for each token type.
  * @param T The type of the token identifier
  */
-@InterfaceAudience.LimitedPrivate({HDFS, MapReduce})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 public abstract class SecretManagerT extends TokenIdentifier {
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index bd254e6..2420155 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -36,7 +36,7 @@ import java.util.ServiceLoader;
 /**
  * The client-side form of the token.
  */
-@InterfaceAudience.LimitedPrivate({HDFS, MapReduce})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class TokenT extends TokenIdentifier implements Writable {
   public static final Log LOG = LogFactory.getLog(Token.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
index ebf9d58..0b111cc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
+++ 

[11/17] hadoop git commit: HADOOP-12235 hadoop-openstack junit mockito dependencies should be provided. (Ted Yu via stevel)

2015-07-20 Thread aw
HADOOP-12235 hadoop-openstack junit  mockito dependencies should be 
provided. (Ted Yu via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05130e94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05130e94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05130e94

Branch: refs/heads/HADOOP-12111
Commit: 05130e94c5223a8ed70a7fb5d1398e5d536f5f03
Parents: 176131f
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 11:22:22 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 11:22:39 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-tools/hadoop-openstack/pom.xml   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05130e94/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 62703c3..481d7de 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -969,6 +969,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12240. Fix tests requiring native library to be skipped in 
non-native
 profile. (Masatake Iwasaki via ozawa)
 
+HADOOP-12235 hadoop-openstack junit  mockito dependencies should be
+provided. (Ted Yu via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05130e94/hadoop-tools/hadoop-openstack/pom.xml
--
diff --git a/hadoop-tools/hadoop-openstack/pom.xml 
b/hadoop-tools/hadoop-openstack/pom.xml
index afdda99..1b541e2 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -128,12 +128,12 @@
 dependency
   groupIdjunit/groupId
   artifactIdjunit/artifactId
-  scopecompile/scope
+  scopeprovided/scope
 /dependency
 dependency
   groupIdorg.mockito/groupId
   artifactIdmockito-all/artifactId
-  scopecompile/scope
+  scopeprovided/scope
 /dependency
 dependency
   groupIdcom.google.guava/groupId



[08/17] hadoop git commit: YARN-3905. Application History Server UI NPEs when accessing apps run after RM restart (Eric Payne via jeagles)

2015-07-20 Thread aw
YARN-3905. Application History Server UI NPEs when accessing apps run after RM 
restart (Eric Payne via jeagles)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7faae0e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7faae0e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7faae0e6

Branch: refs/heads/HADOOP-12111
Commit: 7faae0e6fe027a3886d9f4e290b6a488a2c55b3a
Parents: 9b272cc
Author: Jonathan Eagles jeag...@yahoo-inc.com
Authored: Fri Jul 17 11:02:11 2015 -0500
Committer: Jonathan Eagles jeag...@yahoo-inc.com
Committed: Fri Jul 17 11:02:11 2015 -0500

--
 hadoop-yarn-project/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/yarn/server/webapp/AppBlock.java| 6 ++
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7faae0e6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7a94e09..df27023 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -669,6 +669,9 @@ Release 2.7.2 - UNRELEASED
 
 YARN-3690. [JDK8] 'mvn site' fails. (Brahma Reddy Battula via aajisaka)
 
+YARN-3905. Application History Server UI NPEs when accessing apps run after
+RM restart (Eric Payne via jeagles)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7faae0e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index f46197e..eec32b2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -38,7 +38,6 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
@@ -255,10 +254,9 @@ public class AppBlock extends HtmlBlock {
   AppAttemptInfo appAttempt = new AppAttemptInfo(appAttemptReport);
   ContainerReport containerReport;
   try {
-// AM container is always the first container of the attempt
 final GetContainerReportRequest request =
-GetContainerReportRequest.newInstance(ContainerId.newContainerId(
-  appAttemptReport.getApplicationAttemptId(), 1));
+GetContainerReportRequest.newInstance(
+  appAttemptReport.getAMContainerId());
 if (callerUGI == null) {
   containerReport =
   appBaseProt.getContainerReport(request).getContainerReport();



[02/17] hadoop git commit: Move HDFS-7314 to 2.8 section in CHANGES.txt

2015-07-20 Thread aw
Move HDFS-7314 to 2.8 section in CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bda84fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bda84fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bda84fd

Branch: refs/heads/HADOOP-12111
Commit: 0bda84fd48681ac1748a4770cff2f23e8336d276
Parents: fbd88f10
Author: Ming Ma min...@apache.org
Authored: Thu Jul 16 12:52:27 2015 -0700
Committer: Ming Ma min...@apache.org
Committed: Thu Jul 16 12:52:27 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bda84fd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c6685e1..58491a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -165,9 +165,6 @@ Trunk (Unreleased)
 HDFS-5033. Bad error message for fs -put/copyFromLocal if user
 doesn't have permissions to read the source (Darrell Taylor via aw)
 
-HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
-files rather than the entire DFSClient. (mingma)
-
   OPTIMIZATIONS
 
   BUG FIXES
@@ -725,6 +722,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8742. Inotify: Support event for OP_TRUNCATE.
 (Surendra Singh Lilhore via aajisaka)
 
+HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
+files rather than the entire DFSClient. (mingma)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than



[05/17] hadoop git commit: YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue is more than 2 level. (Ajith S via wangda)

2015-07-20 Thread aw
YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue is 
more than 2 level. (Ajith S via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3540d5fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3540d5fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3540d5fe

Branch: refs/heads/HADOOP-12111
Commit: 3540d5fe4b1da942ea80c9e7ca1126b1abb8a68a
Parents: fa2b63e
Author: Wangda Tan wan...@apache.org
Authored: Thu Jul 16 16:13:32 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Thu Jul 16 16:13:32 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../ProportionalCapacityPreemptionPolicy.java   |  8 -
 ...estProportionalCapacityPreemptionPolicy.java | 32 
 3 files changed, 42 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3540d5fe/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cd0d132..9a6f4d2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -640,6 +640,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3930. FileSystemNodeLabelsStore should make sure edit log file closed 
when 
 exception is thrown. (Dian Fu via wangda)
 
+YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue 
is 
+more than 2 level. (Ajith S via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3540d5fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 6e661d4..1152cef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -896,8 +896,10 @@ public class ProportionalCapacityPreemptionPolicy 
implements SchedulingEditPolic
   ret.untouchableExtra = Resource.newInstance(0, 0);
 } else {
   ret.untouchableExtra =
-Resources.subtractFrom(extra, childrensPreemptable);
+Resources.subtract(extra, childrensPreemptable);
 }
+ret.preemptableExtra = Resources.min(
+rc, partitionResource, childrensPreemptable, extra);
   }
 }
 addTempQueuePartition(ret);
@@ -1127,4 +1129,8 @@ public class ProportionalCapacityPreemptionPolicy 
implements SchedulingEditPolic
 }
   }
 
+  @VisibleForTesting
+  public MapString, MapString, TempQueuePerPartition getQueuePartitions() {
+return queueToPartitions;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3540d5fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
index 3057360..bc4d0dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
@@ -63,6 +63,7 

hadoop git commit: HDFS-8344. NameNode doesn't recover lease for files with missing blocks (raviprak)

2015-07-20 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d3c23c7b1 - 3d58c7a70


HDFS-8344. NameNode doesn't recover lease for files with missing blocks 
(raviprak)

(cherry picked from commit e4f756260f16156179ba4adad974ec92279c2fac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d58c7a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d58c7a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d58c7a7

Branch: refs/heads/branch-2
Commit: 3d58c7a7006991b46efe8a8f60b244f4f85b481a
Parents: d3c23c7
Author: Ravi Prakash ravip...@altiscale.com
Authored: Mon Jul 20 14:03:34 2015 -0700
Committer: Ravi Prakash ravip...@altiscale.com
Committed: Mon Jul 20 14:04:28 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  3 +
 .../BlockInfoUnderConstruction.java | 19 -
 .../server/blockmanagement/BlockManager.java| 14 +++-
 .../hdfs/server/namenode/FSNamesystem.java  | 10 +++
 .../src/main/resources/hdfs-default.xml |  9 +++
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 78 
 7 files changed, 132 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d58c7a7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bc01dde..c4ce009 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -714,6 +714,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8778. TestBlockReportRateLimiting#testLeaseExpiration can deadlock.
 (Arpit Agarwal)
 
+HDFS-8344. NameNode doesn't recover lease for files with missing blocks
+(raviprak)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d58c7a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0fafade..37e2d3d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -440,6 +440,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final longDFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT = 10 * 
1000;
   public static final String  DFS_BLOCK_INVALIDATE_LIMIT_KEY = 
dfs.block.invalidate.limit;
   public static final int DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT = 1000;
+  public static final String  DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS = 
dfs.block.uc.max.recovery.attempts;
+  public static final int DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS_DEFAULT = 5;
+
   public static final String  DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = 
dfs.corruptfilesreturned.max;
   public static final int DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED = 500;
   /* Maximum number of blocks to process for initializing replication queues */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d58c7a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index 9cd3987..28f1633 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
@@ -61,6 +60,11 @@ public abstract class BlockInfoUnderConstruction extends 
BlockInfo {
*/
   protected Block truncateBlock;
 
+  /** The number of times all replicas will be used to attempt recovery before
+   * giving up and marking the block under construction missing.
+   */
+  private int recoveryAttemptsBeforeMarkingBlockMissing;
+
   /**
* ReplicaUnderConstruction contains information about replicas while
* they are under construction.
@@ -174,6 +178,8 @@ 

hadoop git commit: HDFS-8344. NameNode doesn't recover lease for files with missing blocks (raviprak)

2015-07-20 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/trunk 98c2bc87b - e4f756260


HDFS-8344. NameNode doesn't recover lease for files with missing blocks 
(raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4f75626
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4f75626
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4f75626

Branch: refs/heads/trunk
Commit: e4f756260f16156179ba4adad974ec92279c2fac
Parents: 98c2bc8
Author: Ravi Prakash ravip...@altiscale.com
Authored: Mon Jul 20 14:03:34 2015 -0700
Committer: Ravi Prakash ravip...@altiscale.com
Committed: Mon Jul 20 14:03:34 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  3 +
 .../BlockInfoUnderConstruction.java | 19 -
 .../server/blockmanagement/BlockManager.java| 14 +++-
 .../hdfs/server/namenode/FSNamesystem.java  | 10 +++
 .../src/main/resources/hdfs-default.xml |  9 +++
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 78 
 7 files changed, 132 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f75626/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 58491a6..13d9969 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1050,6 +1050,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8778. TestBlockReportRateLimiting#testLeaseExpiration can deadlock.
 (Arpit Agarwal)
 
+HDFS-8344. NameNode doesn't recover lease for files with missing blocks
+(raviprak)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f75626/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0e569f0..210d1e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -440,6 +440,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final longDFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT = 10 * 
1000;
   public static final String  DFS_BLOCK_INVALIDATE_LIMIT_KEY = 
dfs.block.invalidate.limit;
   public static final int DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT = 1000;
+  public static final String  DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS = 
dfs.block.uc.max.recovery.attempts;
+  public static final int DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS_DEFAULT = 5;
+
   public static final String  DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = 
dfs.corruptfilesreturned.max;
   public static final int DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED = 500;
   /* Maximum number of blocks to process for initializing replication queues */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f75626/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index 9cd3987..28f1633 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
@@ -61,6 +60,11 @@ public abstract class BlockInfoUnderConstruction extends 
BlockInfo {
*/
   protected Block truncateBlock;
 
+  /** The number of times all replicas will be used to attempt recovery before
+   * giving up and marking the block under construction missing.
+   */
+  private int recoveryAttemptsBeforeMarkingBlockMissing;
+
   /**
* ReplicaUnderConstruction contains information about replicas while
* they are under construction.
@@ -174,6 +178,8 @@ public abstract class BlockInfoUnderConstruction extends 
BlockInfo {
 

[2/2] hadoop git commit: HDFS-8788. Implement unit tests for remote block reader in libhdfspp. Contributed by Haohui Mai.

2015-07-20 Thread wheat9
HDFS-8788. Implement unit tests for remote block reader in libhdfspp. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/172623de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/172623de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/172623de

Branch: refs/heads/HDFS-8707
Commit: 172623de91dcc6e27d2b97d37ccdd8fb9089ab0d
Parents: 928a9a1
Author: Haohui Mai whe...@apache.org
Authored: Wed Jul 15 16:58:42 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Jul 15 16:59:05 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |  13 ++
 .../src/main/native/CMakeLists.txt  |   2 +
 .../src/main/native/libhdfspp/CMakeLists.txt|   1 +
 .../main/native/libhdfspp/tests/CMakeLists.txt  |  22 ++
 .../native/libhdfspp/tests/mock_connection.cc   |  25 +++
 .../native/libhdfspp/tests/mock_connection.h|  64 ++
 .../libhdfspp/tests/remote_block_reader_test.cc | 213 +++
 7 files changed, 340 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/172623de/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 1a03cbd..9f7070e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -137,6 +137,19 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
   /target
 /configuration
   /execution
+  execution
+idnative_tests/id
+phasetest/phase
+goalsgoalrun/goal/goals
+configuration
+  skip${skipTests}/skip
+  target
+exec executable=make 
dir=${project.build.directory}/native failonerror=true
+  arg line=test/
+/exec
+  /target
+/configuration
+  /execution
 /executions
   /plugin
 /plugins

http://git-wip-us.apache.org/repos/asf/hadoop/blob/172623de/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/CMakeLists.txt
index ef14183..309e99f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/CMakeLists.txt
@@ -18,4 +18,6 @@
 
 cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
 
+enable_testing()
+
 add_subdirectory(libhdfspp)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/172623de/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/CMakeLists.txt
index cae786c..51e3122 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/CMakeLists.txt
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/CMakeLists.txt
@@ -51,3 +51,4 @@ include_directories(
 
 add_subdirectory(third_party/gmock-1.7.0)
 add_subdirectory(lib)
+add_subdirectory(tests)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/172623de/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/tests/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/tests/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/tests/CMakeLists.txt
new file mode 100644
index 000..cd5e1b1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/tests/CMakeLists.txt
@@ -0,0 +1,22 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# License); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

[04/17] hadoop git commit: YARN-3930. FileSystemNodeLabelsStore should make sure edit log file closed when exception is thrown. (Dian Fu via wangda)

2015-07-20 Thread aw
YARN-3930. FileSystemNodeLabelsStore should make sure edit log file closed when 
exception is thrown. (Dian Fu via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa2b63ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa2b63ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa2b63ed

Branch: refs/heads/HADOOP-12111
Commit: fa2b63ed162410ba05eadf211a1da068351b293a
Parents: d96bbe1
Author: Wangda Tan wan...@apache.org
Authored: Thu Jul 16 16:06:20 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Thu Jul 16 16:06:20 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../nodelabels/FileSystemNodeLabelsStore.java   | 39 
 2 files changed, 27 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa2b63ed/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a098a64..cd0d132 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -637,6 +637,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3805. Update the documentation of Disk Checker based on YARN-90.
 (Masatake Iwasaki via ozawa)
 
+YARN-3930. FileSystemNodeLabelsStore should make sure edit log file closed 
when 
+exception is thrown. (Dian Fu via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa2b63ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
index f26e204..abf07e8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
@@ -127,31 +127,40 @@ public class FileSystemNodeLabelsStore extends 
NodeLabelsStore {
   @Override
   public void updateNodeToLabelsMappings(
   MapNodeId, SetString nodeToLabels) throws IOException {
-ensureAppendEditlogFile();
-editlogOs.writeInt(SerializedLogType.NODE_TO_LABELS.ordinal());
-((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest
-.newInstance(nodeToLabels)).getProto().writeDelimitedTo(editlogOs);
-ensureCloseEditlogFile();
+try {
+  ensureAppendEditlogFile();
+  editlogOs.writeInt(SerializedLogType.NODE_TO_LABELS.ordinal());
+  ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest
+  .newInstance(nodeToLabels)).getProto().writeDelimitedTo(editlogOs);
+} finally {
+  ensureCloseEditlogFile();
+}
   }
 
   @Override
   public void storeNewClusterNodeLabels(ListNodeLabel labels)
   throws IOException {
-ensureAppendEditlogFile();
-editlogOs.writeInt(SerializedLogType.ADD_LABELS.ordinal());
-((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest
-.newInstance(labels)).getProto().writeDelimitedTo(editlogOs);
-ensureCloseEditlogFile();
+try {
+  ensureAppendEditlogFile();
+  editlogOs.writeInt(SerializedLogType.ADD_LABELS.ordinal());
+  ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest
+  .newInstance(labels)).getProto().writeDelimitedTo(editlogOs);
+} finally {
+  ensureCloseEditlogFile();
+}
   }
 
   @Override
   public void removeClusterNodeLabels(CollectionString labels)
   throws IOException {
-ensureAppendEditlogFile();
-editlogOs.writeInt(SerializedLogType.REMOVE_LABELS.ordinal());
-((RemoveFromClusterNodeLabelsRequestPBImpl) 
RemoveFromClusterNodeLabelsRequest.newInstance(Sets
-
.newHashSet(labels.iterator(.getProto().writeDelimitedTo(editlogOs);
-ensureCloseEditlogFile();
+try {
+  ensureAppendEditlogFile();
+  editlogOs.writeInt(SerializedLogType.REMOVE_LABELS.ordinal());
+  ((RemoveFromClusterNodeLabelsRequestPBImpl) 
RemoveFromClusterNodeLabelsRequest.newInstance(Sets
+  
.newHashSet(labels.iterator(.getProto().writeDelimitedTo(editlogOs);
+} finally {
+  ensureCloseEditlogFile();
+}
   }
 
   /* (non-Javadoc)



[07/17] hadoop git commit: YARN-3535. Scheduler must re-request container resources when RMContainer transitions from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)

2015-07-20 Thread aw
YARN-3535. Scheduler must re-request container resources when RMContainer 
transitions from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b272cca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b272cca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b272cca

Branch: refs/heads/HADOOP-12111
Commit: 9b272ccae78918e7d756d84920a9322187d61eed
Parents: ee36f4f
Author: Arun Suresh asur...@apache.org
Authored: Fri Jul 17 04:31:34 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Fri Jul 17 04:31:34 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../rmcontainer/RMContainerImpl.java| 14 ++-
 .../scheduler/capacity/CapacityScheduler.java   | 10 ++-
 .../event/ContainerRescheduledEvent.java| 35 
 .../scheduler/event/SchedulerEventType.java |  3 +
 .../scheduler/fair/FairScheduler.java   | 11 ++-
 .../scheduler/fifo/FifoScheduler.java   |  9 ++
 .../applicationsmanager/TestAMRestart.java  |  2 +-
 .../scheduler/TestAbstractYarnScheduler.java| 90 
 .../scheduler/fair/TestFairScheduler.java   |  4 +
 10 files changed, 177 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b272cca/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9a6f4d2..7a94e09 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -643,6 +643,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue 
is 
 more than 2 level. (Ajith S via wangda)
 
+YARN-3535. Scheduler must re-request container resources when RMContainer 
transitions
+from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b272cca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 0ad63b4..f7d3f56 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerRescheduledEvent;
 import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
@@ -94,7 +95,7 @@ public class RMContainerImpl implements RMContainer, 
ComparableRMContainer {
 .addTransition(RMContainerState.ALLOCATED, RMContainerState.EXPIRED,
 RMContainerEventType.EXPIRE, new FinishedTransition())
 .addTransition(RMContainerState.ALLOCATED, RMContainerState.KILLED,
-RMContainerEventType.KILL, new FinishedTransition())
+RMContainerEventType.KILL, new ContainerRescheduledTransition())
 
 // Transitions from ACQUIRED state
 .addTransition(RMContainerState.ACQUIRED, RMContainerState.RUNNING,
@@ -495,6 +496,17 @@ public class RMContainerImpl implements RMContainer, 
ComparableRMContainer {
 }
   }
 
+  private static final class ContainerRescheduledTransition extends
+  FinishedTransition {
+
+@Override
+public void transition(RMContainerImpl container, RMContainerEvent event) {
+  // Tell scheduler to recover request of this container to app
+  container.eventHandler.handle(new 

[01/17] hadoop git commit: HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write files rather than the entire DFSClient. (mingma)

2015-07-20 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12111 840e0e5f7 - d84da00b1


HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write 
files rather than the entire DFSClient. (mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbd88f10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbd88f10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbd88f10

Branch: refs/heads/HADOOP-12111
Commit: fbd88f1062f3c4b208724d208e3f501eb196dfab
Parents: 1ba2986
Author: Ming Ma min...@apache.org
Authored: Thu Jul 16 12:33:57 2015 -0700
Committer: Ming Ma min...@apache.org
Committed: Thu Jul 16 12:33:57 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 16 +
 .../hadoop/hdfs/client/impl/LeaseRenewer.java   | 12 +++-
 .../hadoop/hdfs/TestDFSClientRetries.java   | 66 +++-
 4 files changed, 79 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbd88f10/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8f6dd41..c6685e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -165,6 +165,9 @@ Trunk (Unreleased)
 HDFS-5033. Bad error message for fs -put/copyFromLocal if user
 doesn't have permissions to read the source (Darrell Taylor via aw)
 
+HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
+files rather than the entire DFSClient. (mingma)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbd88f10/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 6629a83..6f9e613 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -567,23 +567,9 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   void closeConnectionToNamenode() {
 RPC.stopProxy(namenode);
   }
-  
-  /** Abort and release resources held.  Ignore all errors. */
-  public void abort() {
-clientRunning = false;
-closeAllFilesBeingWritten(true);
-try {
-  // remove reference to this client and stop the renewer,
-  // if there is no more clients under the renewer.
-  getLeaseRenewer().closeClient(this);
-} catch (IOException ioe) {
-   LOG.info(Exception occurred while aborting the client  + ioe);
-}
-closeConnectionToNamenode();
-  }
 
   /** Close/abort all files being written. */
-  private void closeAllFilesBeingWritten(final boolean abort) {
+  public void closeAllFilesBeingWritten(final boolean abort) {
 for(;;) {
   final long inodeId;
   final DFSOutputStream out;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbd88f10/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
index 99323bb..c689b73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
@@ -215,6 +215,12 @@ public class LeaseRenewer {
 return renewal;
   }
 
+  /** Used for testing only. */
+  @VisibleForTesting
+  public synchronized void setRenewalTime(final long renewal) {
+this.renewal = renewal;
+  }
+
   /** Add a client. */
   private synchronized void addClient(final DFSClient dfsc) {
 for(DFSClient c : dfsclients) {
@@ -453,8 +459,12 @@ public class LeaseRenewer {
   + (elapsed/1000) +  seconds.  Aborting ..., ie);
   synchronized (this) {
 while (!dfsclients.isEmpty()) {
-  dfsclients.get(0).abort();
+  DFSClient dfsClient = dfsclients.get(0);
+  dfsClient.closeAllFilesBeingWritten(true);
+  closeClient(dfsClient);
 }
+//Expire the current LeaseRenewer thread.
+emptyTime = 0;
   

[12/17] hadoop git commit: HADOOP-12209 Comparable type should be in FileStatus. (Yong Zhang via stevel)

2015-07-20 Thread aw
HADOOP-12209 Comparable type should be in FileStatus.   (Yong Zhang via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9141e1aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9141e1aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9141e1aa

Branch: refs/heads/HADOOP-12111
Commit: 9141e1aa16561e44f73e00b349735f530c94acc3
Parents: 05130e9
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 12:32:32 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 12:32:44 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/fs/FileStatus.java   | 15 +
 .../org/apache/hadoop/fs/LocatedFileStatus.java | 10 +++--
 .../fs/viewfs/ViewFsLocatedFileStatus.java  |  3 ++-
 .../org/apache/hadoop/fs/TestFileStatus.java| 22 
 5 files changed, 35 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 481d7de..18475b9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -972,6 +972,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12235 hadoop-openstack junit  mockito dependencies should be
 provided. (Ted Yu via stevel)
 
+HADOOP-12209 Comparable type should be in FileStatus.
+(Yong Zhang via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 98757a7..6a79768 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.io.Writable;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FileStatus implements Writable, Comparable {
+public class FileStatus implements Writable, ComparableFileStatus {
 
   private Path path;
   private long length;
@@ -323,19 +323,14 @@ public class FileStatus implements Writable, Comparable {
   }
 
   /**
-   * Compare this object to another object
-   * 
-   * @param   o the object to be compared.
+   * Compare this FileStatus to another FileStatus
+   * @param   o the FileStatus to be compared.
* @return  a negative integer, zero, or a positive integer as this object
*   is less than, equal to, or greater than the specified object.
-   * 
-   * @throws ClassCastException if the specified object's is not of 
-   * type FileStatus
*/
   @Override
-  public int compareTo(Object o) {
-FileStatus other = (FileStatus)o;
-return this.getPath().compareTo(other.getPath());
+  public int compareTo(FileStatus o) {
+return this.getPath().compareTo(o.getPath());
   }
   
   /** Compare if this object is equal to another object

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index 9e920c5..588fd6a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -90,17 +90,13 @@ public class LocatedFileStatus extends FileStatus {
   }
   
   /**
-   * Compare this object to another object
-   * 
-   * @param   o the object to be compared.
+   * Compare this FileStatus to another FileStatus
+   * @param   o the FileStatus to be compared.
* @return  a negative integer, zero, or a positive integer as this object
*   is less than, equal to, or greater than the specified object.
-   * 
-   * @throws ClassCastException if the specified object's is not of 
-   * type FileStatus
*/
   @Override
-  public int compareTo(Object o) {
+  public int compareTo(FileStatus o) {
 return super.compareTo(o);
   }
   


[10/17] hadoop git commit: Pulling in YARN-3535 to branch 2.7.x

2015-07-20 Thread aw
Pulling in YARN-3535 to branch 2.7.x


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/176131f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/176131f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/176131f1

Branch: refs/heads/HADOOP-12111
Commit: 176131f12bc0d467e9caaa6a94b4ba96e09a4539
Parents: 419c51d
Author: Arun Suresh asur...@apache.org
Authored: Sat Jul 18 10:05:54 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Sat Jul 18 10:05:54 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/176131f1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8f7a365..e6a3343 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -646,9 +646,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue 
is 
 more than 2 level. (Ajith S via wangda)
 
-YARN-3535. Scheduler must re-request container resources when RMContainer 
transitions
-from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -675,6 +672,9 @@ Release 2.7.2 - UNRELEASED
 YARN-3905. Application History Server UI NPEs when accessing apps run after
 RM restart (Eric Payne via jeagles)
 
+YARN-3535. Scheduler must re-request container resources when RMContainer 
transitions
+from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES



[13/17] hadoop git commit: HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json). (Brahma Reddy Battula via stevel)

2015-07-20 Thread aw
HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).   
(Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05fa3368
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05fa3368
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05fa3368

Branch: refs/heads/HADOOP-12111
Commit: 05fa3368f12d189a95a2d6cd8eebc6f7e3a719ee
Parents: 9141e1a
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 13:02:51 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 13:03:03 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05fa3368/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 18475b9..bfa9aac 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -975,6 +975,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12209 Comparable type should be in FileStatus.
 (Yong Zhang via stevel)
 
+HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).
+(Brahma Reddy Battula via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05fa3368/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 223e69a..1ffc44d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -544,7 +544,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   // AuthenticatedURL properly to set authToken post initialization)
 }
 HttpExceptionUtils.validateResponse(conn, expectedResponse);
-if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
+if (conn.getContentType() != null
+ conn.getContentType().trim().toLowerCase()
+.startsWith(APPLICATION_JSON_MIME)
  klass != null) {
   ObjectMapper mapper = new ObjectMapper();
   InputStream is = null;



[16/17] hadoop git commit: Merge branch 'trunk' into HADOOP-12111

2015-07-20 Thread aw
Merge branch 'trunk' into HADOOP-12111


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbe6a692
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbe6a692
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbe6a692

Branch: refs/heads/HADOOP-12111
Commit: fbe6a692fd4a8229491c0718407d06ecbd249665
Parents: 840e0e5 98c2bc8
Author: Allen Wittenauer a...@apache.org
Authored: Mon Jul 20 09:39:55 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Mon Jul 20 09:39:55 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 20 +
 .../hadoop-common/src/main/bin/hadoop   | 52 +--
 .../src/main/bin/hadoop-functions.sh| 90 
 .../crypto/key/kms/KMSClientProvider.java   |  4 +-
 .../java/org/apache/hadoop/fs/FileStatus.java   | 15 ++--
 .../org/apache/hadoop/fs/LocatedFileStatus.java | 10 +--
 .../apache/hadoop/fs/RawLocalFileSystem.java| 53 ++--
 .../fs/viewfs/ViewFsLocatedFileStatus.java  |  3 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  2 +-
 .../hadoop/security/token/SecretManager.java|  2 +-
 .../org/apache/hadoop/security/token/Token.java |  2 +-
 .../hadoop/security/token/TokenIdentifier.java  |  2 +-
 .../apache/hadoop/security/token/TokenInfo.java |  2 +-
 .../hadoop/security/token/TokenRenewer.java |  2 +-
 .../hadoop/security/token/TokenSelector.java|  2 +-
 .../hadoop/security/token/package-info.java |  2 +-
 .../org/apache/hadoop/fs/TestFileStatus.java| 22 +
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 22 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hadoop-hdfs/src/main/bin/hdfs   | 82 --
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 16 +---
 .../hadoop/hdfs/client/impl/LeaseRenewer.java   | 12 ++-
 .../hadoop/hdfs/TestDFSClientRetries.java   | 66 +-
 hadoop-mapreduce-project/bin/mapred | 33 +++
 hadoop-tools/hadoop-openstack/pom.xml   |  4 +-
 hadoop-yarn-project/CHANGES.txt | 15 
 hadoop-yarn-project/hadoop-yarn/bin/yarn| 54 +---
 .../nodelabels/FileSystemNodeLabelsStore.java   | 39 +
 .../hadoop/yarn/server/webapp/AppBlock.java |  6 +-
 .../container-executor/impl/configuration.c |  9 +-
 .../impl/container-executor.c   | 20 +++--
 .../impl/container-executor.h   |  4 +-
 .../test/test-container-executor.c  | 32 +++
 .../ProportionalCapacityPreemptionPolicy.java   |  8 +-
 .../rmcontainer/RMContainerImpl.java| 14 ++-
 .../scheduler/capacity/CapacityScheduler.java   | 10 ++-
 .../event/ContainerRescheduledEvent.java| 35 
 .../scheduler/event/SchedulerEventType.java |  3 +
 .../scheduler/fair/FairScheduler.java   | 11 ++-
 .../scheduler/fifo/FifoScheduler.java   |  9 ++
 .../applicationsmanager/TestAMRestart.java  |  2 +-
 ...estProportionalCapacityPreemptionPolicy.java | 32 +++
 .../scheduler/TestAbstractYarnScheduler.java| 90 
 .../scheduler/fair/TestFairScheduler.java   |  4 +
 44 files changed, 667 insertions(+), 253 deletions(-)
--




[09/17] hadoop git commit: YARN-3844. Make hadoop-yarn-project Native code -Wall-clean (Alan Burlison via Colin P. McCabe)

2015-07-20 Thread aw
YARN-3844. Make hadoop-yarn-project Native code -Wall-clean (Alan Burlison via 
Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/419c51d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/419c51d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/419c51d2

Branch: refs/heads/HADOOP-12111
Commit: 419c51d233bd124eadb38ff013693576ec02c4f1
Parents: 7faae0e
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Fri Jul 17 11:38:59 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Fri Jul 17 11:38:59 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../container-executor/impl/configuration.c |  9 +++---
 .../impl/container-executor.c   | 20 ++--
 .../impl/container-executor.h   |  4 +--
 .../test/test-container-executor.c  | 32 +++-
 5 files changed, 38 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/419c51d2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index df27023..8f7a365 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -332,6 +332,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3069. Document missing properties in yarn-default.xml.
 (Ray Chiang via aajisaka)
 
+YARN-3844. Make hadoop-yarn-project Native code -Wall-clean (Alan Burlison
+via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/419c51d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index 51adc97..eaa1f19 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -22,6 +22,7 @@
 #include configuration.h
 #include container-executor.h
 
+#include inttypes.h
 #include errno.h
 #include unistd.h
 #include stdio.h
@@ -74,14 +75,14 @@ static int is_only_root_writable(const char *file) {
 return 0;
   }
   if (file_stat.st_uid != 0) {
-fprintf(ERRORFILE, File %s must be owned by root, but is owned by %d\n,
-file, file_stat.st_uid);
+fprintf(ERRORFILE, File %s must be owned by root, but is owned by % 
PRId64 \n,
+file, (int64_t)file_stat.st_uid);
 return 0;
   }
   if ((file_stat.st_mode  (S_IWGRP | S_IWOTH)) != 0) {
 fprintf(ERRORFILE, 
-   File %s must not be world or group writable, but is %03o\n,
-   file, file_stat.st_mode  (~S_IFMT));
+   File %s must not be world or group writable, but is %03lo\n,
+   file, (unsigned long)file_stat.st_mode  (~S_IFMT));
 return 0;
   }
   return 1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/419c51d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index ff28d30..0663166 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -19,6 +19,7 @@
 #include configuration.h
 #include container-executor.h
 
+#include inttypes.h
 #include libgen.h
 #include dirent.h
 #include fcntl.h
@@ -68,7 +69,7 @@ void set_nm_uid(uid_t user, gid_t group) {
  */
 char* get_executable() {
   char buffer[PATH_MAX];
-  snprintf(buffer, PATH_MAX, /proc/%u/exe, getpid());
+  snprintf(buffer, PATH_MAX, /proc/% PRId64 /exe, (int64_t)getpid());
   char *filename = 

[03/17] hadoop git commit: HDFS-8767. RawLocalFileSystem.listStatus() returns null for UNIX pipefile. Contributed by kanaka kumar avvaru.

2015-07-20 Thread aw
HDFS-8767. RawLocalFileSystem.listStatus() returns null for UNIX pipefile. 
Contributed by kanaka kumar avvaru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d96bbe15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d96bbe15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d96bbe15

Branch: refs/heads/HADOOP-12111
Commit: d96bbe152cf536304208f2e8f35deb3b2aa91d2b
Parents: 0bda84f
Author: Haohui Mai whe...@apache.org
Authored: Thu Jul 16 15:21:53 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu Jul 16 15:21:53 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../apache/hadoop/fs/RawLocalFileSystem.java| 53 +++-
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 22 
 3 files changed, 53 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d96bbe15/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index cf79bab..b54688f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -987,6 +987,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-12191. Bzip2Factory is not thread safe. (Brahma Reddy Battula
 via ozawa)
 
+HDFS-8767. RawLocalFileSystem.listStatus() returns null for UNIX pipefile.
+(kanaka kumar avvaru via wheat9)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d96bbe15/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index ac65b62..4728dbe 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -459,35 +459,38 @@ public class RawLocalFileSystem extends FileSystem {
 if (!localf.exists()) {
   throw new FileNotFoundException(File  + f +  does not exist);
 }
-if (localf.isFile()) {
-  if (!useDeprecatedFileStatus) {
-return new FileStatus[] { getFileStatus(f) };
-  }
-  return new FileStatus[] {
-new DeprecatedRawLocalFileStatus(localf, getDefaultBlockSize(f), 
this)};
-}
 
-String[] names = localf.list();
-if (names == null) {
-  return null;
-}
-results = new FileStatus[names.length];
-int j = 0;
-for (int i = 0; i  names.length; i++) {
-  try {
-// Assemble the path using the Path 3 arg constructor to make sure
-// paths with colon are properly resolved on Linux
-results[j] = getFileStatus(new Path(f, new Path(null, null, 
names[i])));
-j++;
-  } catch (FileNotFoundException e) {
-// ignore the files not found since the dir list may have have changed
-// since the names[] list was generated.
+if (localf.isDirectory()) {
+  String[] names = localf.list();
+  if (names == null) {
+return null;
   }
+  results = new FileStatus[names.length];
+  int j = 0;
+  for (int i = 0; i  names.length; i++) {
+try {
+  // Assemble the path using the Path 3 arg constructor to make sure
+  // paths with colon are properly resolved on Linux
+  results[j] = getFileStatus(new Path(f, new Path(null, null,
+  names[i])));
+  j++;
+} catch (FileNotFoundException e) {
+  // ignore the files not found since the dir list may have have
+  // changed since the names[] list was generated.
+}
+  }
+  if (j == names.length) {
+return results;
+  }
+  return Arrays.copyOf(results, j);
 }
-if (j == names.length) {
-  return results;
+
+if (!useDeprecatedFileStatus) {
+  return new FileStatus[] { getFileStatus(f) };
 }
-return Arrays.copyOf(results, j);
+return new FileStatus[] {
+new DeprecatedRawLocalFileStatus(localf,
+getDefaultBlockSize(f), this) };
   }
   
   protected boolean mkOneDir(File p2f) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d96bbe15/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
--
diff --git 

hadoop git commit: HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)

2015-07-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3d58c7a70 - cadd02ad1


HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cadd02ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cadd02ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cadd02ad

Branch: refs/heads/branch-2
Commit: cadd02ad1ddde1644f7241333c8ae446cb8c5f7f
Parents: 3d58c7a
Author: yliu y...@apache.org
Authored: Tue Jul 21 09:18:43 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Jul 21 09:18:43 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  2 ++
 .../blockmanagement/CorruptReplicasMap.java  | 19 ++-
 .../blockmanagement/TestCorruptReplicaInfo.java  | 12 ++--
 3 files changed, 22 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadd02ad/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c4ce009..f9b365b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -386,6 +386,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
 files rather than the entire DFSClient. (mingma)
 
+HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadd02ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index fc2e234..f83cbaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -17,12 +17,19 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.Server;
 
-import java.util.*;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Stores information about all corrupt blocks in the File System.
@@ -46,8 +53,8 @@ public class CorruptReplicasMap{
 CORRUPTION_REPORTED  // client or datanode reported the corruption
   }
 
-  private final SortedMapBlock, MapDatanodeDescriptor, Reason 
corruptReplicasMap =
-new TreeMapBlock, MapDatanodeDescriptor, Reason();
+  private final MapBlock, MapDatanodeDescriptor, Reason corruptReplicasMap 
=
+new HashMapBlock, MapDatanodeDescriptor, Reason();
 
   /**
* Mark the block belonging to datanode as corrupt.
@@ -181,13 +188,15 @@ public class CorruptReplicasMap{
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists
*
*/
-  long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
+  @VisibleForTesting
+  long[] getCorruptReplicaBlockIdsForTesting(int numExpectedBlocks,
Long startingBlockId) {
 if (numExpectedBlocks  0 || numExpectedBlocks  100) {
   return null;
 }
 
-IteratorBlock blockIt = corruptReplicasMap.keySet().iterator();
+IteratorBlock blockIt = 
+new TreeMap(corruptReplicasMap).keySet().iterator();
 
 // if the starting block id was specified, iterate over keys until
 // we find the matching block. If we find a matching block, break

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadd02ad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index 21fb54e..4bdaaac 100644
--- 

hadoop git commit: HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)

2015-07-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed01dc70b - d6d58606b


HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6d58606
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6d58606
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6d58606

Branch: refs/heads/trunk
Commit: d6d58606b8adf94b208aed5fc2d054b9dd081db1
Parents: ed01dc7
Author: yliu y...@apache.org
Authored: Tue Jul 21 09:20:22 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Jul 21 09:20:22 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  2 ++
 .../blockmanagement/CorruptReplicasMap.java  | 19 ++-
 .../blockmanagement/TestCorruptReplicaInfo.java  | 12 ++--
 3 files changed, 22 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cd32c0e..388b553 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -727,6 +727,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
 files rather than the entire DFSClient. (mingma)
 
+HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index fc2e234..f83cbaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -17,12 +17,19 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.Server;
 
-import java.util.*;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Stores information about all corrupt blocks in the File System.
@@ -46,8 +53,8 @@ public class CorruptReplicasMap{
 CORRUPTION_REPORTED  // client or datanode reported the corruption
   }
 
-  private final SortedMapBlock, MapDatanodeDescriptor, Reason 
corruptReplicasMap =
-new TreeMapBlock, MapDatanodeDescriptor, Reason();
+  private final MapBlock, MapDatanodeDescriptor, Reason corruptReplicasMap 
=
+new HashMapBlock, MapDatanodeDescriptor, Reason();
 
   /**
* Mark the block belonging to datanode as corrupt.
@@ -181,13 +188,15 @@ public class CorruptReplicasMap{
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists
*
*/
-  long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
+  @VisibleForTesting
+  long[] getCorruptReplicaBlockIdsForTesting(int numExpectedBlocks,
Long startingBlockId) {
 if (numExpectedBlocks  0 || numExpectedBlocks  100) {
   return null;
 }
 
-IteratorBlock blockIt = corruptReplicasMap.keySet().iterator();
+IteratorBlock blockIt = 
+new TreeMap(corruptReplicasMap).keySet().iterator();
 
 // if the starting block id was specified, iterate over keys until
 // we find the matching block. If we find a matching block, break

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index 21fb54e..4bdaaac 100644
--- 

hadoop git commit: HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux. (aajisaka)

2015-07-20 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk d6d58606b - 773c67094


HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/773c6709
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/773c6709
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/773c6709

Branch: refs/heads/trunk
Commit: 773c670943757681feeafb227a2d0c29d48f38f1
Parents: d6d5860
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Jul 21 11:21:49 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Jul 21 11:21:49 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/security/UserGroupInformation.java | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/773c6709/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a23a508..ef8e238 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -705,6 +705,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11893. Mark org.apache.hadoop.security.token.Token as
 @InterfaceAudience.Public. (Brahma Reddy Battula via stevel)
 
+HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux.
+(aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773c6709/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index be3d60d..80a0898 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -369,7 +369,8 @@ public class UserGroupInformation {
   private static final boolean windows =
   System.getProperty(os.name).startsWith(Windows);
   private static final boolean is64Bit =
-  System.getProperty(os.arch).contains(64);
+  System.getProperty(os.arch).contains(64) ||
+  System.getProperty(os.arch).contains(s390x);
   private static final boolean aix = 
System.getProperty(os.name).equals(AIX);
 
   /* Return the OS login module class name */



hadoop git commit: HDFS-8657. Update docs for mSNN. Contributed by Jesse Yates.

2015-07-20 Thread atm
Repository: hadoop
Updated Branches:
  refs/heads/trunk e4f756260 - ed01dc70b


HDFS-8657. Update docs for mSNN. Contributed by Jesse Yates.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed01dc70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed01dc70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed01dc70

Branch: refs/heads/trunk
Commit: ed01dc70b2f4ff4bdcaf71c19acf244da0868a82
Parents: e4f7562
Author: Aaron T. Myers a...@apache.org
Authored: Mon Jul 20 16:40:06 2015 -0700
Committer: Aaron T. Myers a...@apache.org
Committed: Mon Jul 20 16:40:06 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../markdown/HDFSHighAvailabilityWithNFS.md | 40 +++-
 .../markdown/HDFSHighAvailabilityWithQJM.md | 32 ++--
 3 files changed, 45 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed01dc70/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 13d9969..cd32c0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -341,6 +341,8 @@ Trunk (Unreleased)
 HDFS-8627. NPE thrown if unable to fetch token from Namenode
 (J.Andreina via vinayakumarb)
 
+HDFS-8657. Update docs for mSNN. (Jesse Yates via atm)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed01dc70/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
index 626a473..cc53a38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
@@ -65,18 +65,18 @@ This impacted the total availability of the HDFS cluster in 
two major ways:
 * Planned maintenance events such as software or hardware upgrades on the
   NameNode machine would result in windows of cluster downtime.
 
-The HDFS High Availability feature addresses the above problems by providing 
the option of running two redundant NameNodes in the same cluster in an 
Active/Passive configuration with a hot standby. This allows a fast failover to 
a new NameNode in the case that a machine crashes, or a graceful 
administrator-initiated failover for the purpose of planned maintenance.
+The HDFS High Availability feature addresses the above problems by providing 
the option of running two (or more, as of Hadoop 3.0.0) redundant NameNodes in 
the same cluster in an Active/Passive configuration with a hot standby(s). This 
allows a fast failover to a new NameNode in the case that a machine crashes, or 
a graceful administrator-initiated failover for the purpose of planned 
maintenance.
 
 Architecture
 
 
-In a typical HA cluster, two separate machines are configured as NameNodes. At 
any point in time, exactly one of the NameNodes is in an *Active* state, and 
the other is in a *Standby* state. The Active NameNode is responsible for all 
client operations in the cluster, while the Standby is simply acting as a 
slave, maintaining enough state to provide a fast failover if necessary.
+In a typical HA cluster, two or more separate machines are configured as 
NameNodes. At any point in time, exactly one of the NameNodes is in an *Active* 
state, and the others are in a *Standby* state. The Active NameNode is 
responsible for all client operations in the cluster, while the Standby is 
simply acting as a slave, maintaining enough state to provide a fast failover 
if necessary.
 
-In order for the Standby node to keep its state synchronized with the Active 
node, the current implementation requires that the two nodes both have access 
to a directory on a shared storage device (eg an NFS mount from a NAS). This 
restriction will likely be relaxed in future versions.
+In order for the Standby nodes to keep their state synchronized with the 
Active node, the current implementation requires that the nodes have access to 
a directory on a shared storage device (eg an NFS mount from a NAS). This 
restriction will likely be relaxed in future versions.
 
-When any namespace modification is performed by the Active node, it durably 
logs a record of the modification to an edit log file stored in the shared 
directory. The Standby node is constantly watching this directory for edits, 
and as it sees the edits, it applies 

hadoop git commit: HDFS-6945. BlockManager should remove a block from excessReplicateMap and decrement ExcessBlocks metric when the block is removed. (aajisaka)

2015-07-20 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 63e4ada51 - 3e793224f


HDFS-6945. BlockManager should remove a block from excessReplicateMap and 
decrement ExcessBlocks metric when the block is removed. (aajisaka)

(cherry picked from commit 18a91fe4df0448d9f7de91602646ecf5a51c52e4)
(cherry picked from commit b85bbca74565b18dfa6689c9545d07bff5d31f83)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e793224
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e793224
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e793224

Branch: refs/heads/branch-2.7
Commit: 3e793224f929bfc272dbfdb0f580208c8703d31f
Parents: 63e4ada
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Apr 1 09:07:28 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Jul 21 11:42:21 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockManager.java| 22 ++--
 .../namenode/metrics/TestNameNodeMetrics.java   |  9 ++--
 3 files changed, 30 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e793224/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5cfb9a5..8aab98a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -16,6 +16,9 @@ Release 2.7.2 - UNRELEASED
 
   BUG FIXES
 
+HDFS-6945. BlockManager should remove a block from excessReplicateMap and
+decrement ExcessBlocks metric when the block is removed. (aajisaka)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e793224/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 09e5748..d770346 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3382,8 +3382,7 @@ public class BlockManager {
 // file already removes them from the block map below.
 block.setNumBytes(BlockCommand.NO_ACK);
 addToInvalidates(block);
-corruptReplicas.removeFromCorruptReplicasMap(block);
-blocksMap.removeBlock(block);
+removeBlockFromMap(block);
 // Remove the block from pendingReplications and neededReplications
 pendingReplications.remove(block);
 neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
@@ -3559,11 +3558,30 @@ public class BlockManager {
   }
 
   public void removeBlockFromMap(Block block) {
+removeFromExcessReplicateMap(block);
 blocksMap.removeBlock(block);
 // If block is removed from blocksMap remove it from corruptReplicasMap
 corruptReplicas.removeFromCorruptReplicasMap(block);
   }
 
+  /**
+   * If a block is removed from blocksMap, remove it from excessReplicateMap.
+   */
+  private void removeFromExcessReplicateMap(Block block) {
+for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
+  String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
+  LightWeightLinkedSetBlock excessReplicas = 
excessReplicateMap.get(uuid);
+  if (excessReplicas != null) {
+if (excessReplicas.remove(block)) {
+  excessBlocksCount.decrementAndGet();
+  if (excessReplicas.isEmpty()) {
+excessReplicateMap.remove(uuid);
+  }
+}
+  }
+}
+  }
+
   public int getCapacity() {
 return blocksMap.getCapacity();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e793224/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 63ab395..1fbb62c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 

hadoop git commit: Move HDFS-6945 to 2.7.2 section in CHANGES.txt.

2015-07-20 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 773c67094 - a628f6759


Move HDFS-6945 to 2.7.2 section in CHANGES.txt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a628f675
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a628f675
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a628f675

Branch: refs/heads/trunk
Commit: a628f675900d2533ddf86fb3d3e601238ecd68c3
Parents: 773c670
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Jul 21 11:45:00 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Jul 21 11:45:00 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a628f675/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 388b553..1293388 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -762,9 +762,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-7997. The first non-existing xattr should also throw IOException.
 (zhouyingchao via yliu)
 
-HDFS-6945. BlockManager should remove a block from excessReplicateMap and
-decrement ExcessBlocks metric when the block is removed. (aajisaka)
-
 HDFS-7922. ShortCircuitCache#close is not releasing
 ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
 
@@ -1072,8 +1069,11 @@ Release 2.7.2 - UNRELEASED
   HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
 
   BUG FIXES
-
-Release 2.7.1 - 2015-07-06 
+
+HDFS-6945. BlockManager should remove a block from excessReplicateMap and
+decrement ExcessBlocks metric when the block is removed. (aajisaka)
+
+Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES
 



hadoop git commit: Move HDFS-6945 to 2.7.2 section in CHANGES.txt.

2015-07-20 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d591f2af5 - e78ee1864


Move HDFS-6945 to 2.7.2 section in CHANGES.txt.

(cherry picked from commit a628f675900d2533ddf86fb3d3e601238ecd68c3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e78ee186
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e78ee186
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e78ee186

Branch: refs/heads/branch-2
Commit: e78ee186420620b97cb75a4e2cb7ac72f25ead86
Parents: d591f2a
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Jul 21 11:45:00 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Jul 21 11:45:53 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e78ee186/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f9b365b..095f774 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -424,9 +424,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-7997. The first non-existing xattr should also throw IOException.
 (zhouyingchao via yliu)
 
-HDFS-6945. BlockManager should remove a block from excessReplicateMap and
-decrement ExcessBlocks metric when the block is removed. (aajisaka)
-
 HDFS-7922. ShortCircuitCache#close is not releasing
 ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
 
@@ -734,8 +731,11 @@ Release 2.7.2 - UNRELEASED
   HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
 
   BUG FIXES
-
-Release 2.7.1 - 2015-07-06 
+
+HDFS-6945. BlockManager should remove a block from excessReplicateMap and
+decrement ExcessBlocks metric when the block is removed. (aajisaka)
+
+Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES
 



hadoop git commit: HDFS-7483. Display information per tier on the Namenode UI. Contributed by Benoy Antony and Haohui Mai.

2015-07-20 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk a628f6759 - df1e8ce44


HDFS-7483. Display information per tier on the Namenode UI. Contributed by 
Benoy Antony and Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df1e8ce4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df1e8ce4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df1e8ce4

Branch: refs/heads/trunk
Commit: df1e8ce44a4716b2cb4ff3d161d7df8081572290
Parents: a628f67
Author: Haohui Mai whe...@apache.org
Authored: Mon Jul 20 20:10:53 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Jul 20 20:13:12 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../src/main/webapps/hdfs/dfshealth.html| 25 
 .../src/main/webapps/hdfs/dfshealth.js  |  8 +++
 .../blockmanagement/TestBlockStatsMXBean.java   |  1 -
 4 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1293388..f38a870 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -729,6 +729,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
 
+HDFS-7483. Display information per tier on the Namenode UI.
+(Benoy Antony and wheat9 via wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 5a3a309..8cdff84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -199,6 +199,31 @@
   {#failed}{#helper_dir_status type=Failed/}{/failed}
   {/nn.NameDirStatuses}
 /table
+div class=page-headerh1DFS Storage Types/h1/div
+small
+table class=table
+  thead
+tr
+  thStorage Type/th
+  thConfigured Capacity/th
+  thCapacity Used/th
+  thCapacity Remaining/th
+  thBlock Pool Used/th
+  thNodes In Service/th
+/tr
+  /thead
+  {#blockstats.StorageTypeStats}
+  tr
+td{key}/td
+td{value.capacityTotal|fmt_bytes}/td
+td{value.capacityUsed|fmt_bytes} 
({value.capacityUsedPercentage|fmt_percentage})/td
+td{value.capacityRemaining|fmt_bytes} 
({value.capacityRemainingPercentage|fmt_percentage})/td
+td{value.blockPoolUsed|fmt_bytes}/td
+td{value.nodesInService}/td
+  /tr
+  {/blockstats.StorageTypeStats}
+/table
+/small
 /script
 
 script type=text/x-dust-template id=tmpl-snapshot

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index a045e42..1c13493 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -29,6 +29,7 @@
   {name: nn,  url: 
/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo},
   {name: nnstat,  url: 
/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus},
   {name: fs,  url: 
/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState},
+  {name: blockstats,  url: 
/jmx?qry=Hadoop:service=NameNode,name=BlockStats},
   {name: mem, url: /jmx?qry=java.lang:type=Memory}
 ];
 
@@ -88,6 +89,13 @@
 for (var k in d) {
   data[k] = k === 'nn' ? workaround(d[k].beans[0]) : d[k].beans[0];
 }
+
+var blockstats = data['blockstats'];
+for (var k in blockstats.StorageTypeStats) {
+  var b = blockstats.StorageTypeStats[k].value;
+  b.capacityUsedPercentage = b.capacityUsed * 100.0 / b.capacityTotal;
+  b.capacityRemainingPercentage = b.capacityRemaining * 100.0 / 
b.capacityTotal;
+}
 render();
   }),
   function (url, jqxhr, text, err) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java

hadoop git commit: HDFS-7483. Display information per tier on the Namenode UI. Contributed by Benoy Antony and Haohui Mai.

2015-07-20 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e78ee1864 - 428383134


HDFS-7483. Display information per tier on the Namenode UI. Contributed by 
Benoy Antony and Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42838313
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42838313
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42838313

Branch: refs/heads/branch-2
Commit: 42838313488ea264ff266348c7936cfd4d4e
Parents: e78ee18
Author: Haohui Mai whe...@apache.org
Authored: Mon Jul 20 20:10:53 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Jul 20 20:13:16 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../src/main/webapps/hdfs/dfshealth.html| 25 
 .../src/main/webapps/hdfs/dfshealth.js  |  8 +++
 .../blockmanagement/TestBlockStatsMXBean.java   |  1 -
 4 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42838313/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 095f774..c502dbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -388,6 +388,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
 
+HDFS-7483. Display information per tier on the Namenode UI.
+(Benoy Antony and wheat9 via wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42838313/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 5a3a309..8cdff84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -199,6 +199,31 @@
   {#failed}{#helper_dir_status type=Failed/}{/failed}
   {/nn.NameDirStatuses}
 /table
+div class=page-headerh1DFS Storage Types/h1/div
+small
+table class=table
+  thead
+tr
+  thStorage Type/th
+  thConfigured Capacity/th
+  thCapacity Used/th
+  thCapacity Remaining/th
+  thBlock Pool Used/th
+  thNodes In Service/th
+/tr
+  /thead
+  {#blockstats.StorageTypeStats}
+  tr
+td{key}/td
+td{value.capacityTotal|fmt_bytes}/td
+td{value.capacityUsed|fmt_bytes} 
({value.capacityUsedPercentage|fmt_percentage})/td
+td{value.capacityRemaining|fmt_bytes} 
({value.capacityRemainingPercentage|fmt_percentage})/td
+td{value.blockPoolUsed|fmt_bytes}/td
+td{value.nodesInService}/td
+  /tr
+  {/blockstats.StorageTypeStats}
+/table
+/small
 /script
 
 script type=text/x-dust-template id=tmpl-snapshot

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42838313/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index a045e42..1c13493 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -29,6 +29,7 @@
   {name: nn,  url: 
/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo},
   {name: nnstat,  url: 
/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus},
   {name: fs,  url: 
/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState},
+  {name: blockstats,  url: 
/jmx?qry=Hadoop:service=NameNode,name=BlockStats},
   {name: mem, url: /jmx?qry=java.lang:type=Memory}
 ];
 
@@ -88,6 +89,13 @@
 for (var k in d) {
   data[k] = k === 'nn' ? workaround(d[k].beans[0]) : d[k].beans[0];
 }
+
+var blockstats = data['blockstats'];
+for (var k in blockstats.StorageTypeStats) {
+  var b = blockstats.StorageTypeStats[k].value;
+  b.capacityUsedPercentage = b.capacityUsed * 100.0 / b.capacityTotal;
+  b.capacityRemainingPercentage = b.capacityRemaining * 100.0 / 
b.capacityTotal;
+}
 render();
   }),
   function (url, jqxhr, text, err) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42838313/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java