[1/2] hadoop git commit: Merge branch 'trunk' into hdfs-7240

2015-07-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/hdfs-7240 [created] be57a1345


Merge branch 'trunk' into hdfs-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b3866d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b3866d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b3866d5

Branch: refs/heads/hdfs-7240
Commit: 6b3866d5299ad305fad4cbb576bb54dfea71e813
Parents: 2ebe8c7 f170934
Author: Anu Engineer aengin...@apache.org
Authored: Tue Jul 28 10:55:34 2015 -0700
Committer: Anu Engineer aengin...@apache.org
Committed: Tue Jul 28 10:55:34 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/io/retry/MultiException.java  |  49 +++
 .../hadoop/io/retry/RetryInvocationHandler.java |  99 +-
 .../src/site/markdown/FileSystemShell.md|   2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../ha/ConfiguredFailoverProxyProvider.java |  52 ++-
 .../ha/RequestHedgingProxyProvider.java | 186 ++
 .../markdown/HDFSHighAvailabilityWithNFS.md |   9 +-
 .../markdown/HDFSHighAvailabilityWithQJM.md |  10 +-
 .../ha/TestRequestHedgingProxyProvider.java | 350 +++
 hadoop-yarn-project/CHANGES.txt |   6 +
 .../container-executor/impl/configuration.c |   4 +-
 .../test/test-container-executor.c  |  22 +-
 .../webapp/CapacitySchedulerPage.java   |   5 +-
 14 files changed, 755 insertions(+), 44 deletions(-)
--




[2/2] hadoop git commit: HDFS-8695. OzoneHandler : Add Bucket REST Interface. (aengineer)

2015-07-28 Thread aengineer
HDFS-8695. OzoneHandler : Add Bucket REST Interface. (aengineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be57a134
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be57a134
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be57a134

Branch: refs/heads/hdfs-7240
Commit: be57a1345fce613093692b3da0e8615412cc0fdc
Parents: 6b3866d
Author: Anu Engineer aengin...@apache.org
Authored: Tue Jul 28 10:56:11 2015 -0700
Committer: Anu Engineer aengin...@apache.org
Committed: Tue Jul 28 10:56:11 2015 -0700

--
 .../hadoop/ozone/web/exceptions/ErrorTable.java |   5 +
 .../hadoop/ozone/web/handlers/BucketArgs.java   |  10 -
 .../ozone/web/handlers/BucketHandler.java   | 193 +
 .../web/handlers/BucketProcessTemplate.java | 278 +++
 .../apache/hadoop/ozone/web/headers/Header.java |  14 +-
 .../hadoop/ozone/web/interfaces/Bucket.java | 133 +
 .../ozone/web/interfaces/StorageHandler.java|  85 ++
 .../web/localstorage/LocalStorageHandler.java   | 109 
 8 files changed, 815 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be57a134/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
index a51dac5..7e75cf0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
@@ -58,6 +58,7 @@ public final class ErrorTable {
   new OzoneException(HTTP_BAD_REQUEST, malformedACL,
  Invalid ACL specified.);
 
+
   public static final OzoneException INVALID_VOLUME_NAME =
   new OzoneException(HTTP_BAD_REQUEST, invalidVolumeName,
  Invalid volume name.);
@@ -81,6 +82,10 @@ public final class ErrorTable {
   new OzoneException(HTTP_BAD_REQUEST, malformedBucketVersion,
  Malformed bucket version or version not unique.);
 
+  public static final OzoneException MALFORMED_STORAGE_TYPE =
+  new OzoneException(HTTP_BAD_REQUEST, malformedStorageType,
+ Invalid storage Type specified.);
+
   public static final OzoneException MALFORMED_STORAGE_CLASS =
   new OzoneException(HTTP_BAD_REQUEST, malformedStorageClass,
  Invalid storage class specified.);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be57a134/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
index 315ae3f..d62c72d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
@@ -164,16 +164,6 @@ public class BucketArgs extends VolumeArgs {
 return versioning;
   }
 
-  /**
-   * Converts a valid String to Enum for ease of use.
-   *
-   * @param version version string.
-   */
-  public void setVersioning(String version) {
-if (version != null) {
-  this.versioning = OzoneConsts.Versioning.valueOf(version.toUpperCase());
-}
-  }
 
   /**
* SetVersioning Info.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be57a134/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
new file mode 100644
index 000..2005367
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
@@ -0,0 +1,193 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  License); you may

[2/6] hadoop git commit: HADOOP-12245. References to misspelled REMAINING_QUATA in FileSystemShell.md. Contributed by Gabor Liptak.

2015-07-28 Thread aengineer
HADOOP-12245. References to misspelled REMAINING_QUATA in FileSystemShell.md. 
Contributed by Gabor Liptak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e21dde50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e21dde50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e21dde50

Branch: refs/heads/HDFS-7240
Commit: e21dde501aa9323b7f34b4bc4ba9d282ec4f2707
Parents: 3572ebd
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Jul 28 11:33:10 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Jul 28 11:33:10 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/site/markdown/FileSystemShell.md| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e21dde50/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index baf39e3..aeaa5b9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1017,6 +1017,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12239. StorageException complaining  no lease ID when updating
 FolderLastModifiedTime in WASB. (Duo Xu via cnauroth)
 
+HADOOP-12245. References to misspelled REMAINING_QUATA in
+FileSystemShell.md. (Gabor Liptak via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e21dde50/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 144cb73..fb89ca1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -174,7 +174,7 @@ Usage: `hadoop fs -count [-q] [-h] [-v] paths `
 
 Count the number of directories, files and bytes under the paths that match 
the specified file pattern. The output columns with -count are: DIR\_COUNT, 
FILE\_COUNT, CONTENT\_SIZE, PATHNAME
 
-The output columns with -count -q are: QUOTA, REMAINING\_QUATA, SPACE\_QUOTA, 
REMAINING\_SPACE\_QUOTA, DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
+The output columns with -count -q are: QUOTA, REMAINING\_QUOTA, SPACE\_QUOTA, 
REMAINING\_SPACE\_QUOTA, DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
 
 The -h option shows sizes in human readable format.
 



[4/6] hadoop git commit: YARN-3982. container-executor parsing of container-executor.cfg broken in trunk and branch-2. Contributed by Varun Vasudev

2015-07-28 Thread aengineer
YARN-3982. container-executor parsing of container-executor.cfg broken
in trunk and branch-2. Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1709342
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1709342
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1709342

Branch: refs/heads/HDFS-7240
Commit: f17093421521efcbdc813f6f2b8411e45ecc7863
Parents: 030fcfa
Author: Xuan xg...@apache.org
Authored: Mon Jul 27 23:45:58 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Mon Jul 27 23:45:58 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../container-executor/impl/configuration.c |  4 ++--
 .../test/test-container-executor.c  | 22 +---
 3 files changed, 20 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1709342/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4f8484a..b4666e8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -698,6 +698,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3846. RM Web UI queue filter is not working for sub queue.
 (Mohammad Shahid Khan via jianhe)
 
+YARN-3982. container-executor parsing of container-executor.cfg broken in
+trunk and branch-2. (Varun Vasudev via xgong)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1709342/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index 2825367..373dbfd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -284,11 +284,11 @@ char * get_value(const char* key) {
 
 /**
  * Function to return an array of values for a key.
- * Value delimiter is assumed to be a '%'.
+ * Value delimiter is assumed to be a ','.
  */
 char ** get_values(const char * key) {
   char *value = get_value(key);
-  return extract_values(value);
+  return extract_values_delim(value, ,);
 }
 
 char ** extract_values_delim(char *value, const char *delim) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1709342/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 99bcf34..001a37d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -145,7 +145,7 @@ void check_pid_file(const char* pid_file, pid_t mypid) {
   }
 
   char myPidBuf[33];
-  snprintf(myPidBuf, 33, % PRId64, (int64_t)mypid);
+  snprintf(myPidBuf, 33, % PRId64, (int64_t)(mypid + 1));
   if (strncmp(pidBuf, myPidBuf, strlen(myPidBuf)) != 0) {
 printf(FAIL: failed to find matching pid in pid file\n);
 printf(FAIL: Expected pid % PRId64  : Got %.*s, (int64_t)mypid,
@@ -212,15 +212,15 @@ void test_get_app_log_dir() {
   free(logdir);
 }
 
-void test_check_user() {
+void test_check_user(int expectedFailure) {
   printf(\nTesting test_check_user\n);
   struct passwd *user = check_user(username);
-  if (user == NULL) {
+  if (user == NULL  !expectedFailure) {
 printf(FAIL: failed check for user %s\n, username);
 exit(1);
   }
   free(user);
-  if (check_user(lp) != NULL) {
+  if (check_user(lp) != NULL  !expectedFailure) {
 printf(FAIL: failed check for system user lp\n);
 exit(1);
   }
@@ -228,7 +228,7 @@ void test_check_user() {
 printf(FAIL: failed check for system user 

Git Push Summary

2015-07-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/hdfs-7240 [deleted] be57a1345


[1/6] hadoop git commit: YARN-3846. RM Web UI queue filter is not working for sub queue. Contributed by Mohammad Shahid Khan

2015-07-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 2ebe8c7cb - c78518749


YARN-3846. RM Web UI queue filter is not working for sub queue. Contributed by 
Mohammad Shahid Khan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3572ebd7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3572ebd7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3572ebd7

Branch: refs/heads/HDFS-7240
Commit: 3572ebd738aa5fa8b0906d75fb12cc6cbb991573
Parents: 3e6fce9
Author: Jian He jia...@apache.org
Authored: Mon Jul 27 16:57:11 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Mon Jul 27 17:12:05 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../server/resourcemanager/webapp/CapacitySchedulerPage.java| 5 -
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3572ebd7/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 534c55a..4f8484a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -695,6 +695,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3958. TestYarnConfigurationFields should be moved to hadoop-yarn-api
 module. (Varun Saxena via aajisaka)
 
+YARN-3846. RM Web UI queue filter is not working for sub queue.
+(Mohammad Shahid Khan via jianhe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3572ebd7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 12a3013..d8971b7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -516,7 +516,10 @@ class CapacitySchedulerPage extends RmView {
 $('#cs').bind('select_node.jstree', function(e, data) {,
   var q = $('.q', data.rslt.obj).first().text();,
   if (q == 'Queue: root') q = '';,
-  else q = '^' + q.substr(q.lastIndexOf(':') + 2) + '$';,
+  else {,
+q = q.substr(q.lastIndexOf(':') + 2);,
+q = '^' + q.substr(q.lastIndexOf('.') + 1) + '$';,
+  },
   $('#apps').dataTable().fnFilter(q, 4, true);,
 });,
 $('#cs').show();,



[5/6] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2015-07-28 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/188d283a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/188d283a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/188d283a

Branch: refs/heads/HDFS-7240
Commit: 188d283a4debe2151ee3327ac23bad86b9b6aba3
Parents: 2ebe8c7 f170934
Author: Anu Engineer aengin...@apache.org
Authored: Tue Jul 28 11:12:08 2015 -0700
Committer: Anu Engineer aengin...@apache.org
Committed: Tue Jul 28 11:12:08 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/io/retry/MultiException.java  |  49 +++
 .../hadoop/io/retry/RetryInvocationHandler.java |  99 +-
 .../src/site/markdown/FileSystemShell.md|   2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../ha/ConfiguredFailoverProxyProvider.java |  52 ++-
 .../ha/RequestHedgingProxyProvider.java | 186 ++
 .../markdown/HDFSHighAvailabilityWithNFS.md |   9 +-
 .../markdown/HDFSHighAvailabilityWithQJM.md |  10 +-
 .../ha/TestRequestHedgingProxyProvider.java | 350 +++
 hadoop-yarn-project/CHANGES.txt |   6 +
 .../container-executor/impl/configuration.c |   4 +-
 .../test/test-container-executor.c  |  22 +-
 .../webapp/CapacitySchedulerPage.java   |   5 +-
 14 files changed, 755 insertions(+), 44 deletions(-)
--




[6/6] hadoop git commit: HDFS-8695. OzoneHandler : Add Bucket REST Interface. (aengineer)

2015-07-28 Thread aengineer
HDFS-8695. OzoneHandler : Add Bucket REST Interface. (aengineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7851874
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7851874
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7851874

Branch: refs/heads/HDFS-7240
Commit: c78518749e431684cfc7685e1cf3299751b130e6
Parents: 188d283
Author: Anu Engineer aengin...@apache.org
Authored: Tue Jul 28 11:13:13 2015 -0700
Committer: Anu Engineer aengin...@apache.org
Committed: Tue Jul 28 11:13:13 2015 -0700

--
 .../hadoop/ozone/web/exceptions/ErrorTable.java |   5 +
 .../hadoop/ozone/web/handlers/BucketArgs.java   |  10 -
 .../ozone/web/handlers/BucketHandler.java   | 193 +
 .../web/handlers/BucketProcessTemplate.java | 278 +++
 .../apache/hadoop/ozone/web/headers/Header.java |  14 +-
 .../hadoop/ozone/web/interfaces/Bucket.java | 133 +
 .../ozone/web/interfaces/StorageHandler.java|  85 ++
 .../web/localstorage/LocalStorageHandler.java   | 109 
 8 files changed, 815 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
index a51dac5..7e75cf0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
@@ -58,6 +58,7 @@ public final class ErrorTable {
   new OzoneException(HTTP_BAD_REQUEST, malformedACL,
  Invalid ACL specified.);
 
+
   public static final OzoneException INVALID_VOLUME_NAME =
   new OzoneException(HTTP_BAD_REQUEST, invalidVolumeName,
  Invalid volume name.);
@@ -81,6 +82,10 @@ public final class ErrorTable {
   new OzoneException(HTTP_BAD_REQUEST, malformedBucketVersion,
  Malformed bucket version or version not unique.);
 
+  public static final OzoneException MALFORMED_STORAGE_TYPE =
+  new OzoneException(HTTP_BAD_REQUEST, malformedStorageType,
+ Invalid storage Type specified.);
+
   public static final OzoneException MALFORMED_STORAGE_CLASS =
   new OzoneException(HTTP_BAD_REQUEST, malformedStorageClass,
  Invalid storage class specified.);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
index 315ae3f..d62c72d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
@@ -164,16 +164,6 @@ public class BucketArgs extends VolumeArgs {
 return versioning;
   }
 
-  /**
-   * Converts a valid String to Enum for ease of use.
-   *
-   * @param version version string.
-   */
-  public void setVersioning(String version) {
-if (version != null) {
-  this.versioning = OzoneConsts.Versioning.valueOf(version.toUpperCase());
-}
-  }
 
   /**
* SetVersioning Info.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
new file mode 100644
index 000..2005367
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
@@ -0,0 +1,193 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  License); you may

[3/6] hadoop git commit: HDFS-7858. Improve HA Namenode Failover detection on the client. (asuresh)

2015-07-28 Thread aengineer
HDFS-7858. Improve HA Namenode Failover detection on the client. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/030fcfa9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/030fcfa9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/030fcfa9

Branch: refs/heads/HDFS-7240
Commit: 030fcfa99c345ad57625486eeabedebf2fd4411f
Parents: e21dde5
Author: Arun Suresh asur...@apache.org
Authored: Mon Jul 27 23:02:03 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Mon Jul 27 23:02:03 2015 -0700

--
 .../apache/hadoop/io/retry/MultiException.java  |  49 +++
 .../hadoop/io/retry/RetryInvocationHandler.java |  99 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../ha/ConfiguredFailoverProxyProvider.java |  52 ++-
 .../ha/RequestHedgingProxyProvider.java | 186 ++
 .../markdown/HDFSHighAvailabilityWithNFS.md |   9 +-
 .../markdown/HDFSHighAvailabilityWithQJM.md |  10 +-
 .../ha/TestRequestHedgingProxyProvider.java | 350 +++
 8 files changed, 724 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
new file mode 100644
index 000..4963a2d
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.io.retry;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Holder class that clients can use to return multiple exceptions.
+ */
+public class MultiException extends IOException {
+
+  private final MapString, Exception exes;
+
+  public MultiException(MapString, Exception exes) {
+this.exes = exes;
+  }
+
+  public MapString, Exception getExceptions() {
+return exes;
+  }
+
+  @Override
+  public String toString() {
+StringBuilder sb = new StringBuilder({);
+for (Exception e : exes.values()) {
+  sb.append(e.toString()).append(, );
+}
+sb.append(});
+return MultiException[ + sb.toString() + ];
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 543567e..9256356 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -23,6 +23,8 @@ import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.commons.logging.Log;
@@ -101,7 +103,7 @@ public class RetryInvocationHandlerT implements 
RpcInvocationHandler {
 Object ret = invokeMethod(method, args);
 hasMadeASuccessfulCall = true;
 return ret;
-  } catch (Exception e) {
+  } catch (Exception ex) {
 boolean isIdempotentOrAtMostOnce = proxyProvider.getInterface()
 .getMethod(method.getName(), method.getParameterTypes())
 .isAnnotationPresent(Idempotent.class);
@@ -110,15 +112,16 @@ public class RetryInvocationHandlerT implements 
RpcInvocationHandler {
   

[2/9] hadoop git commit: HDFS-8721. Add a metric for number of encryption zones. Contributed by Rakesh R.

2015-07-22 Thread aengineer
HDFS-8721. Add a metric for number of encryption zones. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb03768b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb03768b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb03768b

Branch: refs/heads/HDFS-7240
Commit: cb03768b1b2250b9b5a7944cf6ef918e8a974e20
Parents: 5137b38
Author: cnauroth cnaur...@apache.org
Authored: Tue Jul 21 13:55:58 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue Jul 21 13:55:58 2015 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md| 1 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop/hdfs/server/namenode/EncryptionZoneManager.java| 7 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 6 ++
 .../hdfs/server/namenode/metrics/FSNamesystemMBean.java   | 5 +
 .../test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java | 6 ++
 .../hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java| 5 +
 7 files changed, 33 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index ca89745..2b23508 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -216,6 +216,7 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `TotalLoad` | Current number of connections |
 | `SnapshottableDirectories` | Current number of snapshottable directories |
 | `Snapshots` | Current number of snapshots |
+| `NumEncryptionZones` | Current number of encryption zones |
 | `BlocksTotal` | Current number of allocated blocks in the system |
 | `FilesTotal` | Current number of files and directories |
 | `PendingReplicationBlocks` | Current number of blocks pending to be 
replicated |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a29a090..7c771b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -734,6 +734,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7483. Display information per tier on the Namenode UI.
 (Benoy Antony and wheat9 via wheat9)
 
+HDFS-8721. Add a metric for number of encryption zones.
+(Rakesh R via cnauroth)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 3fe748d..7c3c895 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -360,4 +360,11 @@ public class EncryptionZoneManager {
 final boolean hasMore = (numResponses  tailMap.size());
 return new BatchedListEntriesEncryptionZone(zones, hasMore);
   }
+
+  /**
+   * @return number of encryption zones.
+   */
+  public int getNumEncryptionZones() {
+return encryptionZones.size();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7c6d6a1..fd37fbe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4075,6 +4075,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 

[6/9] hadoop git commit: HADOOP-12017. Hadoop archives command should use configurable replication factor when closing (Contributed by Bibin A Chundatt)

2015-07-22 Thread aengineer
HADOOP-12017. Hadoop archives command should use configurable replication 
factor when closing (Contributed by Bibin A Chundatt)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94c6a4aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94c6a4aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94c6a4aa

Branch: refs/heads/HDFS-7240
Commit: 94c6a4aa85e7d98e9b532b330f30783315f4334b
Parents: 31f1171
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Jul 22 10:25:49 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Jul 22 10:25:49 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../org/apache/hadoop/tools/HadoopArchives.java | 21 ++--
 .../src/site/markdown/HadoopArchives.md.vm  |  2 +-
 .../apache/hadoop/tools/TestHadoopArchives.java | 26 
 4 files changed, 33 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94c6a4aa/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5b51bce..3d101d4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -992,6 +992,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
 over getMessage() in logging/span events. (Varun Saxena via stevel)
 
+HADOOP-12017. Hadoop archives command should use configurable replication
+factor when closing (Bibin A Chundatt via vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94c6a4aa/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
--
diff --git 
a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
 
b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
index 330830b..ee14850 100644
--- 
a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
+++ 
b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
@@ -100,15 +100,17 @@ public class HadoopArchives implements Tool {
   static final String SRC_PARENT_LABEL = NAME + .parent.path;
   /** the size of the blocks that will be created when archiving **/
   static final String HAR_BLOCKSIZE_LABEL = NAME + .block.size;
-  /**the size of the part files that will be created when archiving **/
+  /** the replication factor for the file in archiving. **/
+  static final String HAR_REPLICATION_LABEL = NAME + .replication.factor;
+  /** the size of the part files that will be created when archiving **/
   static final String HAR_PARTSIZE_LABEL = NAME + .partfile.size;
 
   /** size of each part file size **/
   long partSize = 2 * 1024 * 1024 * 1024l;
   /** size of blocks in hadoop archives **/
   long blockSize = 512 * 1024 * 1024l;
-  /** the desired replication degree; default is 10 **/
-  short repl = 10;
+  /** the desired replication degree; default is 3 **/
+  short repl = 3;
 
   private static final String usage = archive
   +  -archiveName NAME.har -p parent path [-r replication factor] +
@@ -475,6 +477,7 @@ public class HadoopArchives implements Tool {
 conf.setLong(HAR_PARTSIZE_LABEL, partSize);
 conf.set(DST_HAR_LABEL, archiveName);
 conf.set(SRC_PARENT_LABEL, parentPath.makeQualified(fs).toString());
+conf.setInt(HAR_REPLICATION_LABEL, repl);
 Path outputPath = new Path(dest, archiveName);
 FileOutputFormat.setOutputPath(conf, outputPath);
 FileSystem outFs = outputPath.getFileSystem(conf);
@@ -549,8 +552,6 @@ public class HadoopArchives implements Tool {
 } finally {
   srcWriter.close();
 }
-//increase the replication of src files
-jobfs.setReplication(srcFiles, repl);
 conf.setInt(SRC_COUNT_LABEL, numFiles);
 conf.setLong(TOTAL_SIZE_LABEL, totalSize);
 int numMaps = (int)(totalSize/partSize);
@@ -587,6 +588,7 @@ public class HadoopArchives implements Tool {
 FileSystem destFs = null;
 byte[] buffer;
 int buf_size = 128 * 1024;
+private int replication = 3;
 long blockSize = 512 * 1024 * 1024l;
 
 // configure the mapper and create 
@@ -595,7 +597,7 @@ public class HadoopArchives implements Tool {
 // tmp files. 
 public void configure(JobConf conf) {
   this.conf = conf;
-
+  replication = conf.getInt(HAR_REPLICATION_LABEL, 3);
   // this is tightly tied to map reduce
   // since it does not expose an api 
   

[5/9] hadoop git commit: HDFS-8495. Consolidate append() related implementation into a single class. Contributed by Rakesh R.

2015-07-22 Thread aengineer
HDFS-8495. Consolidate append() related implementation into a single class. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31f11713
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31f11713
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31f11713

Branch: refs/heads/HDFS-7240
Commit: 31f117138a00794de4951ee8433e304d72b04094
Parents: 393fe71
Author: Haohui Mai whe...@apache.org
Authored: Tue Jul 21 17:25:23 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue Jul 21 17:25:23 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSDirAppendOp.java | 261 +++
 .../server/namenode/FSDirStatAndListingOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java   |  16 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |   6 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 241 ++---
 7 files changed, 304 insertions(+), 229 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f11713/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8122045..50803de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -737,6 +737,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8721. Add a metric for number of encryption zones.
 (Rakesh R via cnauroth)
 
+HDFS-8495. Consolidate append() related implementation into a single class.
+(Rakesh R via wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f11713/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
new file mode 100644
index 000..abb2dc8
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
@@ -0,0 +1,261 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Helper class to perform append operation.
+ */
+final class FSDirAppendOp {
+
+  /**
+   * Private constructor for preventing FSDirAppendOp object creation.
+   * Static-only class.
+   */
+  private FSDirAppendOp() {}
+
+  /**
+   * Append to an existing file.
+   * p
+   *
+   * The method returns the last block of the file if this is a partial block,
+   * which can still be used for writing more data. The client uses the
+   * returned block locations to form the data 

[4/9] hadoop git commit: YARN-3878. AsyncDispatcher can hang while stopping if it is configured for draining events on stop. Contributed by Varun Saxena

2015-07-22 Thread aengineer
YARN-3878. AsyncDispatcher can hang while stopping if it is configured for 
draining events on stop. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/393fe717
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/393fe717
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/393fe717

Branch: refs/heads/HDFS-7240
Commit: 393fe71771e3ac6bc0efe59d9aaf19d3576411b3
Parents: a26cc66
Author: Jian He jia...@apache.org
Authored: Tue Jul 21 15:05:41 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Tue Jul 21 15:05:41 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../hadoop/yarn/event/AsyncDispatcher.java  |  8 +++
 .../hadoop/yarn/event/DrainDispatcher.java  | 11 +++-
 .../hadoop/yarn/event/TestAsyncDispatcher.java  | 62 
 4 files changed, 83 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/393fe717/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 79e9ae2..5100cdf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -682,6 +682,9 @@ Release 2.7.2 - UNRELEASED
 YARN-3535. Scheduler must re-request container resources when RMContainer 
transitions
 from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)
 
+YARN-3878. AsyncDispatcher can hang while stopping if it is configured for
+draining events on stop. (Varun Saxena via jianhe)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/393fe717/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index c54b9c7..48312a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -246,6 +246,9 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
 if (!stopped) {
   LOG.warn(AsyncDispatcher thread interrupted, e);
 }
+// Need to reset drained flag to true if event queue is empty,
+// otherwise dispatcher will hang on stop.
+drained = eventQueue.isEmpty();
 throw new YarnRuntimeException(e);
   }
 };
@@ -287,6 +290,11 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
   }
 
   @VisibleForTesting
+  protected boolean isEventThreadWaiting() {
+return eventHandlingThread.getState() == Thread.State.WAITING;
+  }
+
+  @VisibleForTesting
   protected boolean isDrained() {
 return this.drained;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/393fe717/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
index da5ae44..e4a5a82 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
@@ -27,11 +27,20 @@ public class DrainDispatcher extends AsyncDispatcher {
 this(new LinkedBlockingQueueEvent());
   }
 
-  private DrainDispatcher(BlockingQueueEvent eventQueue) {
+  public DrainDispatcher(BlockingQueueEvent eventQueue) {
 super(eventQueue);
   }
 
   /**
+   *  Wait till event thread enters WAITING state (i.e. waiting for new 
events).
+   */
+  public void waitForEventThreadToWait() {
+while (!isEventThreadWaiting()) {
+  Thread.yield();
+}
+  }
+
+  /**
* Busy loop waiting for all queued events to drain.
*/
   public void await() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/393fe717/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
--
diff --git 

[7/9] hadoop git commit: HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)

2015-07-22 Thread aengineer
HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40253262
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40253262
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40253262

Branch: refs/heads/HDFS-7240
Commit: 4025326288c0167ff300d4f7ecc96f84ed141912
Parents: 94c6a4a
Author: yliu y...@apache.org
Authored: Wed Jul 22 15:16:50 2015 +0800
Committer: yliu y...@apache.org
Committed: Wed Jul 22 15:16:50 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 .../hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java| 5 +++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40253262/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 50803de..66cb89e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -740,6 +740,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8495. Consolidate append() related implementation into a single class.
 (Rakesh R via wheat9)
 
+HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40253262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index a465f85..c486095 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -22,9 +22,9 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.TreeMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,6 +36,7 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdfs.DFSUtil;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import org.slf4j.Logger;
 
 /**
@@ -47,7 +48,7 @@ import org.slf4j.Logger;
 class InvalidateBlocks {
   /** Mapping: DatanodeInfo - Collection of Blocks */
   private final MapDatanodeInfo, LightWeightHashSetBlock node2blocks =
-  new TreeMapDatanodeInfo, LightWeightHashSetBlock();
+  new HashMapDatanodeInfo, LightWeightHashSetBlock();
   /** The total number of blocks in the map. */
   private long numBlocks = 0L;
 



[9/9] hadoop git commit: HDFS-8753. Ozone: Unify StorageContainerConfiguration with ozone-default.xml ozone-site.xml. Contributed by kanaka kumar avvaru

2015-07-22 Thread aengineer
HDFS-8753. Ozone: Unify StorageContainerConfiguration with ozone-default.xml  
ozone-site.xml. Contributed by kanaka kumar avvaru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43bed72d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43bed72d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43bed72d

Branch: refs/heads/HDFS-7240
Commit: 43bed72d1355d44f6e910ba7ab9858dfde6edc4f
Parents: 01094cbf
Author: Anu Engineer anu.engin...@gmail.com
Authored: Wed Jul 22 10:31:43 2015 -0700
Committer: Anu Engineer anu.engin...@gmail.com
Committed: Wed Jul 22 10:31:43 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  2 ++
 .../apache/hadoop/ozone/OzoneConfiguration.java | 36 
 .../ozone/StorageContainerConfiguration.java| 35 ---
 .../web/localstorage/OzoneMetadataManager.java  |  4 +--
 .../StorageContainerConfiguration.java  | 32 -
 .../StorageContainerManager.java|  5 +--
 .../src/main/resources/ozone-default.xml| 27 +++
 .../src/test/resources/ozone-site.xml   | 24 +
 8 files changed, 94 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43bed72d/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index db38851..5ec8a24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -307,6 +307,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
 configuration
   tasks
 copy file=src/main/resources/hdfs-default.xml 
todir=src/site/resources/
+copy file=src/main/resources/ozone-default.xml 
todir=src/site/resources/
 copy file=src/main/xsl/configuration.xsl 
todir=src/site/resources/
   /tasks
 /configuration
@@ -401,6 +402,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
   includes
 includeconfiguration.xsl/include
 includehdfs-default.xml/include
+includeozone-default.xml/include
   /includes
   followSymlinksfalse/followSymlinks
 /fileset

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43bed72d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfiguration.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfiguration.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfiguration.java
new file mode 100644
index 000..70efa49
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfiguration.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Configuration for ozone.
+ */
+@InterfaceAudience.Private
+public class OzoneConfiguration extends Configuration {
+  static {
+// adds the default resources
+Configuration.addDefaultResource(hdfs-default.xml);
+Configuration.addDefaultResource(hdfs-site.xml);
+Configuration.addDefaultResource(ozone-default.xml);
+Configuration.addDefaultResource(ozone-site.xml);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43bed72d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/StorageContainerConfiguration.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/StorageContainerConfiguration.java
 

[8/9] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2015-07-22 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01094cbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01094cbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01094cbf

Branch: refs/heads/HDFS-7240
Commit: 01094cbf0a44af89fd0a2e91812c9c0756de6934
Parents: 12bd963 4025326
Author: Anu Engineer anu.engin...@gmail.com
Authored: Wed Jul 22 10:28:28 2015 -0700
Committer: Anu Engineer anu.engin...@gmail.com
Committed: Wed Jul 22 10:28:28 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop-common/src/site/markdown/Metrics.md  |   6 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  14 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   3 -
 .../BlockInfoUnderConstruction.java |  19 +-
 .../server/blockmanagement/BlockManager.java|  14 +-
 .../blockmanagement/InvalidateBlocks.java   |   5 +-
 .../server/namenode/EncryptionZoneManager.java  |   7 +
 .../hdfs/server/namenode/FSDirAppendOp.java | 261 +++
 .../server/namenode/FSDirStatAndListingOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java   |  16 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |   6 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 257 +++---
 .../namenode/metrics/FSNamesystemMBean.java |   5 +
 .../src/main/resources/hdfs-default.xml |   9 -
 .../apache/hadoop/hdfs/TestEncryptionZones.java |   6 +
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   |  78 --
 .../server/namenode/TestFSNamesystemMBean.java  |   5 +
 .../org/apache/hadoop/tools/HadoopArchives.java |  21 +-
 .../src/site/markdown/HadoopArchives.md.vm  |   2 +-
 .../apache/hadoop/tools/TestHadoopArchives.java |  26 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/event/AsyncDispatcher.java  |   8 +
 .../hadoop/yarn/event/DrainDispatcher.java  |  11 +-
 .../hadoop/yarn/event/TestAsyncDispatcher.java  |  62 +
 26 files changed, 470 insertions(+), 383 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01094cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--



[1/9] hadoop git commit: Revert HDFS-8344. NameNode doesn't recover lease for files with missing blocks (raviprak)

2015-07-22 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 12bd96395 - 43bed72d1


Revert HDFS-8344. NameNode doesn't recover lease for files with missing blocks 
(raviprak)

This reverts commit e4f756260f16156179ba4adad974ec92279c2fac.

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5137b388
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5137b388
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5137b388

Branch: refs/heads/HDFS-7240
Commit: 5137b388fc9d4d716f780daf6d04292feeb9df96
Parents: 68d1f4b
Author: Ravi Prakash ravip...@altiscale.com
Authored: Tue Jul 21 11:29:35 2015 -0700
Committer: Ravi Prakash ravip...@altiscale.com
Committed: Tue Jul 21 11:29:35 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  3 -
 .../BlockInfoUnderConstruction.java | 19 +
 .../server/blockmanagement/BlockManager.java| 14 +---
 .../hdfs/server/namenode/FSNamesystem.java  | 10 ---
 .../src/main/resources/hdfs-default.xml |  9 ---
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 78 
 7 files changed, 4 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5137b388/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 223baaf..a29a090 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1056,9 +1056,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-8778. TestBlockReportRateLimiting#testLeaseExpiration can deadlock.
 (Arpit Agarwal)
 
-HDFS-8344. NameNode doesn't recover lease for files with missing blocks
-(raviprak)
-
 HDFS-7582. Enforce maximum number of ACL entries separately per access
 and default. (vinayakumarb)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5137b388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 210d1e5..0e569f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -440,9 +440,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final longDFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT = 10 * 
1000;
   public static final String  DFS_BLOCK_INVALIDATE_LIMIT_KEY = 
dfs.block.invalidate.limit;
   public static final int DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT = 1000;
-  public static final String  DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS = 
dfs.block.uc.max.recovery.attempts;
-  public static final int DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS_DEFAULT = 5;
-
   public static final String  DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = 
dfs.corruptfilesreturned.max;
   public static final int DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED = 500;
   /* Maximum number of blocks to process for initializing replication queues */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5137b388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index 28f1633..9cd3987 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
@@ -60,11 +61,6 @@ public abstract class BlockInfoUnderConstruction extends 
BlockInfo {
*/
   protected Block truncateBlock;
 
-  /** The number of times all replicas will be used to attempt recovery before
-   * giving up and marking the block under construction missing.
-   */
-  private int recoveryAttemptsBeforeMarkingBlockMissing;
-
   /**
* 

[3/9] hadoop git commit: HDFS-8773. Few FSNamesystem metrics are not documented in the Metrics page. Contributed by Rakesh R.

2015-07-22 Thread aengineer
HDFS-8773. Few FSNamesystem metrics are not documented in the Metrics page. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a26cc66f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a26cc66f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a26cc66f

Branch: refs/heads/HDFS-7240
Commit: a26cc66f38daec2342215a66b599bf59cee1112c
Parents: cb03768
Author: cnauroth cnaur...@apache.org
Authored: Tue Jul 21 14:12:03 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue Jul 21 14:12:03 2015 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md  | 5 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a26cc66f/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 2b23508..646cda5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -231,6 +231,11 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `BlockCapacity` | Current number of block capacity |
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed 
heartbeat |
 | `TotalFiles` | Current number of files and directories (same as FilesTotal) |
+| `MissingReplOneBlocks` | Current number of missing blocks with replication 
factor 1 |
+| `NumFilesUnderConstruction` | Current number of files under construction |
+| `NumActiveClients` | Current number of active clients holding lease |
+| `HAState` | (HA-only) Current state of the NameNode: initializing or active 
or standby or stopping state |
+| `FSState` | Current state of the file system: Safemode or Operational |
 
 JournalNode
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a26cc66f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7c771b0..8122045 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1062,6 +1062,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7582. Enforce maximum number of ACL entries separately per access
 and default. (vinayakumarb)
 
+HDFS-8773. Few FSNamesystem metrics are not documented in the Metrics page.
+(Rakesh R via cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[03/19] hadoop git commit: HADOOP-12209 Comparable type should be in FileStatus. (Yong Zhang via stevel)

2015-07-21 Thread aengineer
HADOOP-12209 Comparable type should be in FileStatus.   (Yong Zhang via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9141e1aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9141e1aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9141e1aa

Branch: refs/heads/HDFS-7240
Commit: 9141e1aa16561e44f73e00b349735f530c94acc3
Parents: 05130e9
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 12:32:32 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 12:32:44 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/fs/FileStatus.java   | 15 +
 .../org/apache/hadoop/fs/LocatedFileStatus.java | 10 +++--
 .../fs/viewfs/ViewFsLocatedFileStatus.java  |  3 ++-
 .../org/apache/hadoop/fs/TestFileStatus.java| 22 
 5 files changed, 35 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 481d7de..18475b9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -972,6 +972,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12235 hadoop-openstack junit  mockito dependencies should be
 provided. (Ted Yu via stevel)
 
+HADOOP-12209 Comparable type should be in FileStatus.
+(Yong Zhang via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 98757a7..6a79768 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.io.Writable;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FileStatus implements Writable, Comparable {
+public class FileStatus implements Writable, ComparableFileStatus {
 
   private Path path;
   private long length;
@@ -323,19 +323,14 @@ public class FileStatus implements Writable, Comparable {
   }
 
   /**
-   * Compare this object to another object
-   * 
-   * @param   o the object to be compared.
+   * Compare this FileStatus to another FileStatus
+   * @param   o the FileStatus to be compared.
* @return  a negative integer, zero, or a positive integer as this object
*   is less than, equal to, or greater than the specified object.
-   * 
-   * @throws ClassCastException if the specified object's is not of 
-   * type FileStatus
*/
   @Override
-  public int compareTo(Object o) {
-FileStatus other = (FileStatus)o;
-return this.getPath().compareTo(other.getPath());
+  public int compareTo(FileStatus o) {
+return this.getPath().compareTo(o.getPath());
   }
   
   /** Compare if this object is equal to another object

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index 9e920c5..588fd6a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -90,17 +90,13 @@ public class LocatedFileStatus extends FileStatus {
   }
   
   /**
-   * Compare this object to another object
-   * 
-   * @param   o the object to be compared.
+   * Compare this FileStatus to another FileStatus
+   * @param   o the FileStatus to be compared.
* @return  a negative integer, zero, or a positive integer as this object
*   is less than, equal to, or greater than the specified object.
-   * 
-   * @throws ClassCastException if the specified object's is not of 
-   * type FileStatus
*/
   @Override
-  public int compareTo(Object o) {
+  public int compareTo(FileStatus o) {
 return super.compareTo(o);
   }
   


[12/19] hadoop git commit: HDFS-7483. Display information per tier on the Namenode UI. Contributed by Benoy Antony and Haohui Mai.

2015-07-21 Thread aengineer
HDFS-7483. Display information per tier on the Namenode UI. Contributed by 
Benoy Antony and Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df1e8ce4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df1e8ce4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df1e8ce4

Branch: refs/heads/HDFS-7240
Commit: df1e8ce44a4716b2cb4ff3d161d7df8081572290
Parents: a628f67
Author: Haohui Mai whe...@apache.org
Authored: Mon Jul 20 20:10:53 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Jul 20 20:13:12 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../src/main/webapps/hdfs/dfshealth.html| 25 
 .../src/main/webapps/hdfs/dfshealth.js  |  8 +++
 .../blockmanagement/TestBlockStatsMXBean.java   |  1 -
 4 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1293388..f38a870 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -729,6 +729,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
 
+HDFS-7483. Display information per tier on the Namenode UI.
+(Benoy Antony and wheat9 via wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 5a3a309..8cdff84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -199,6 +199,31 @@
   {#failed}{#helper_dir_status type=Failed/}{/failed}
   {/nn.NameDirStatuses}
 /table
+div class=page-headerh1DFS Storage Types/h1/div
+small
+table class=table
+  thead
+tr
+  thStorage Type/th
+  thConfigured Capacity/th
+  thCapacity Used/th
+  thCapacity Remaining/th
+  thBlock Pool Used/th
+  thNodes In Service/th
+/tr
+  /thead
+  {#blockstats.StorageTypeStats}
+  tr
+td{key}/td
+td{value.capacityTotal|fmt_bytes}/td
+td{value.capacityUsed|fmt_bytes} 
({value.capacityUsedPercentage|fmt_percentage})/td
+td{value.capacityRemaining|fmt_bytes} 
({value.capacityRemainingPercentage|fmt_percentage})/td
+td{value.blockPoolUsed|fmt_bytes}/td
+td{value.nodesInService}/td
+  /tr
+  {/blockstats.StorageTypeStats}
+/table
+/small
 /script
 
 script type=text/x-dust-template id=tmpl-snapshot

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index a045e42..1c13493 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -29,6 +29,7 @@
   {name: nn,  url: 
/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo},
   {name: nnstat,  url: 
/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus},
   {name: fs,  url: 
/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState},
+  {name: blockstats,  url: 
/jmx?qry=Hadoop:service=NameNode,name=BlockStats},
   {name: mem, url: /jmx?qry=java.lang:type=Memory}
 ];
 
@@ -88,6 +89,13 @@
 for (var k in d) {
   data[k] = k === 'nn' ? workaround(d[k].beans[0]) : d[k].beans[0];
 }
+
+var blockstats = data['blockstats'];
+for (var k in blockstats.StorageTypeStats) {
+  var b = blockstats.StorageTypeStats[k].value;
+  b.capacityUsedPercentage = b.capacityUsed * 100.0 / b.capacityTotal;
+  b.capacityRemainingPercentage = b.capacityRemaining * 100.0 / 
b.capacityTotal;
+}
 render();
   }),
   function (url, jqxhr, text, err) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
--
diff --git 

[16/19] hadoop git commit: YARN-3915. scmadmin help message correction (Bibin A Chundatt via aw)

2015-07-21 Thread aengineer
YARN-3915. scmadmin help message correction  (Bibin A Chundatt via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da2d1ac4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da2d1ac4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da2d1ac4

Branch: refs/heads/HDFS-7240
Commit: da2d1ac4bc0bf0812b9a2a1ffbb7748113cdaf6d
Parents: c9507fe
Author: Allen Wittenauer a...@apache.org
Authored: Tue Jul 21 09:44:45 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Jul 21 09:44:45 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt| 2 ++
 .../src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java  | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da2d1ac4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e6a3343..d0829c1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -51,6 +51,8 @@ Trunk - Unreleased
 YARN-2355. MAX_APP_ATTEMPTS_ENV may no longer be a useful env var
 for a container (Darrell Taylor via aw)
 
+YARN-3915. scmadmin help message correction (Bibin A Chundatt via aw)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da2d1ac4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
index 1e45c5a..dc6bf48 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
@@ -52,7 +52,7 @@ public class SCMAdmin extends Configured implements Tool {
 String summary = scmadmin is the command to execute shared cache manager 
+
 administrative commands.\n +
 The full syntax is: \n\n +
-hadoop scmadmin +
+yarn scmadmin +
  [-runCleanerTask] +
  [-help [cmd]]\n;
 



[17/19] hadoop git commit: YARN-2003. Support for Application priority : Changes in RM and Capacity Scheduler. (Sunil G via wangda)

2015-07-21 Thread aengineer
YARN-2003. Support for Application priority : Changes in RM and Capacity 
Scheduler. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c39ca541
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c39ca541
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c39ca541

Branch: refs/heads/HDFS-7240
Commit: c39ca541f498712133890961598bbff50d89d68b
Parents: da2d1ac
Author: Wangda Tan wan...@apache.org
Authored: Tue Jul 21 09:56:59 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Tue Jul 21 09:57:23 2015 -0700

--
 .../sls/scheduler/ResourceSchedulerWrapper.java |  10 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   5 +
 .../server/resourcemanager/RMAppManager.java|  20 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  15 +-
 .../scheduler/AbstractYarnScheduler.java|  10 +
 .../server/resourcemanager/scheduler/Queue.java |   8 +
 .../scheduler/SchedulerApplication.java |  22 ++
 .../scheduler/SchedulerApplicationAttempt.java  |  15 +-
 .../scheduler/YarnScheduler.java|  20 ++
 .../scheduler/capacity/AbstractCSQueue.java |   7 +
 .../scheduler/capacity/CapacityScheduler.java   |  73 +++-
 .../CapacitySchedulerConfiguration.java |  13 +
 .../scheduler/capacity/LeafQueue.java   |  19 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   8 +
 .../scheduler/event/AppAddedSchedulerEvent.java |  28 +-
 .../resourcemanager/scheduler/fair/FSQueue.java |   6 +
 .../scheduler/fifo/FifoScheduler.java   |   6 +
 .../scheduler/policy/FifoComparator.java|  11 +-
 .../scheduler/policy/SchedulableEntity.java |   5 +
 .../yarn/server/resourcemanager/MockRM.java |  31 +-
 .../server/resourcemanager/TestAppManager.java  |   1 +
 .../TestWorkPreservingRMRestart.java|   2 +-
 ...pacityPreemptionPolicyForNodePartitions.java |   1 +
 .../capacity/TestApplicationLimits.java |   5 +-
 .../capacity/TestApplicationPriority.java   | 345 +++
 .../capacity/TestCapacityScheduler.java |   5 +
 .../scheduler/policy/MockSchedulableEntity.java |  13 +-
 .../security/TestDelegationTokenRenewer.java|  10 +-
 .../TestRMWebServicesAppsModification.java  |   2 +-
 30 files changed, 664 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
index 08cb1e6..14e2645 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
@@ -949,4 +950,13 @@ final public class ResourceSchedulerWrapper
   ContainerStatus containerStatus, RMContainerEventType event) {
 // do nothing
   }
+
+  @Override
+  public Priority checkAndGetApplicationPriority(Priority priority,
+  String user, String queueName, ApplicationId applicationId)
+  throws YarnException {
+// TODO Dummy implementation.
+return Priority.newInstance(0);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d0829c1..7259cf2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -139,6 +139,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3116. RM notifies NM whether a container is an AM container or normal
 task container. (Giovanni Matteo Fumarola via zjshen)
 
+YARN-2003. Support for Application priority : Changes in RM and Capacity 
+Scheduler. (Sunil G via wangda)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before


[13/19] hadoop git commit: HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options. (Contributed by Vinayakumar B)

2015-07-21 Thread aengineer
HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options. (Contributed 
by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87f29c6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87f29c6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87f29c6b

Branch: refs/heads/HDFS-7240
Commit: 87f29c6b8acc07cc011713a79554d51945e265ac
Parents: df1e8ce
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Jul 21 13:12:46 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Jul 21 13:12:46 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../org/apache/hadoop/tools/HadoopArchives.java | 87 +---
 .../apache/hadoop/tools/TestHadoopArchives.java |  4 +-
 3 files changed, 64 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87f29c6b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ef8e238..24709e0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -708,6 +708,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux.
 (aajisaka)
 
+HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options.
+(vinayakumarb)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87f29c6b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
--
diff --git 
a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
 
b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
index f00bb6d..330830b 100644
--- 
a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
+++ 
b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
@@ -33,6 +33,11 @@ import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
 
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.Parser;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -81,6 +86,10 @@ public class HadoopArchives implements Tool {
   private static final Log LOG = LogFactory.getLog(HadoopArchives.class);
   
   private static final String NAME = har; 
+  private static final String ARCHIVE_NAME = archiveName;
+  private static final String REPLICATION = r;
+  private static final String PARENT_PATH = p;
+  private static final String HELP = help;
   static final String SRC_LIST_LABEL = NAME + .src.list;
   static final String DST_DIR_LABEL = NAME + .dest.path;
   static final String TMP_DIR_LABEL = NAME + .tmp.dir;
@@ -101,9 +110,9 @@ public class HadoopArchives implements Tool {
   /** the desired replication degree; default is 10 **/
   short repl = 10;
 
-  private static final String usage = Usage: archive
-  +  -archiveName NAME.har -p parent path [-r replication factor] +
-  src* dest +
+  private static final String usage = archive
+  +  -archiveName NAME.har -p parent path [-r replication factor] +
+   src* dest +
   \n;
   
  
@@ -794,7 +803,17 @@ public class HadoopArchives implements Tool {
 }
 
   }
-  
+
+  private void printUsage(Options opts, boolean printDetailed) {
+HelpFormatter helpFormatter = new HelpFormatter();
+if (printDetailed) {
+  helpFormatter.printHelp(usage.length() + 10, usage, null, opts, null,
+  false);
+} else {
+  System.out.println(usage);
+}
+  }
+
   /** the main driver for creating the archives
*  it takes at least three command line parameters. The parent path, 
*  The src and the dest. It does an lsr on the source paths.
@@ -804,43 +823,51 @@ public class HadoopArchives implements Tool {
 
   public int run(String[] args) throws Exception {
 try {
-  Path parentPath = null;
-  ListPath srcPaths = new ArrayListPath();
-  Path destPath = null;
-  String archiveName = null;
-  if (args.length  5) {
-System.out.println(usage);
-throw new IOException(Invalid usage.);
-  }
-  if (!-archiveName.equals(args[0])) {
-System.out.println(usage);
+  // Parse CLI options
+  Options options = new Options();
+  

[08/19] hadoop git commit: HDFS-8657. Update docs for mSNN. Contributed by Jesse Yates.

2015-07-21 Thread aengineer
HDFS-8657. Update docs for mSNN. Contributed by Jesse Yates.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed01dc70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed01dc70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed01dc70

Branch: refs/heads/HDFS-7240
Commit: ed01dc70b2f4ff4bdcaf71c19acf244da0868a82
Parents: e4f7562
Author: Aaron T. Myers a...@apache.org
Authored: Mon Jul 20 16:40:06 2015 -0700
Committer: Aaron T. Myers a...@apache.org
Committed: Mon Jul 20 16:40:06 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../markdown/HDFSHighAvailabilityWithNFS.md | 40 +++-
 .../markdown/HDFSHighAvailabilityWithQJM.md | 32 ++--
 3 files changed, 45 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed01dc70/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 13d9969..cd32c0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -341,6 +341,8 @@ Trunk (Unreleased)
 HDFS-8627. NPE thrown if unable to fetch token from Namenode
 (J.Andreina via vinayakumarb)
 
+HDFS-8657. Update docs for mSNN. (Jesse Yates via atm)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed01dc70/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
index 626a473..cc53a38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
@@ -65,18 +65,18 @@ This impacted the total availability of the HDFS cluster in 
two major ways:
 * Planned maintenance events such as software or hardware upgrades on the
   NameNode machine would result in windows of cluster downtime.
 
-The HDFS High Availability feature addresses the above problems by providing 
the option of running two redundant NameNodes in the same cluster in an 
Active/Passive configuration with a hot standby. This allows a fast failover to 
a new NameNode in the case that a machine crashes, or a graceful 
administrator-initiated failover for the purpose of planned maintenance.
+The HDFS High Availability feature addresses the above problems by providing 
the option of running two (or more, as of Hadoop 3.0.0) redundant NameNodes in 
the same cluster in an Active/Passive configuration with a hot standby(s). This 
allows a fast failover to a new NameNode in the case that a machine crashes, or 
a graceful administrator-initiated failover for the purpose of planned 
maintenance.
 
 Architecture
 
 
-In a typical HA cluster, two separate machines are configured as NameNodes. At 
any point in time, exactly one of the NameNodes is in an *Active* state, and 
the other is in a *Standby* state. The Active NameNode is responsible for all 
client operations in the cluster, while the Standby is simply acting as a 
slave, maintaining enough state to provide a fast failover if necessary.
+In a typical HA cluster, two or more separate machines are configured as 
NameNodes. At any point in time, exactly one of the NameNodes is in an *Active* 
state, and the others are in a *Standby* state. The Active NameNode is 
responsible for all client operations in the cluster, while the Standby is 
simply acting as a slave, maintaining enough state to provide a fast failover 
if necessary.
 
-In order for the Standby node to keep its state synchronized with the Active 
node, the current implementation requires that the two nodes both have access 
to a directory on a shared storage device (eg an NFS mount from a NAS). This 
restriction will likely be relaxed in future versions.
+In order for the Standby nodes to keep their state synchronized with the 
Active node, the current implementation requires that the nodes have access to 
a directory on a shared storage device (eg an NFS mount from a NAS). This 
restriction will likely be relaxed in future versions.
 
-When any namespace modification is performed by the Active node, it durably 
logs a record of the modification to an edit log file stored in the shared 
directory. The Standby node is constantly watching this directory for edits, 
and as it sees the edits, it applies them to its own namespace. In the event of 
a failover, the Standby will 

[1/3] hadoop git commit: MAPREDUCE-5801. Uber mode's log message is missing a vcore reason (Steven Wong via aw)

2015-07-21 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 942e1ac21 - 12bd96395


MAPREDUCE-5801. Uber mode's log message is missing a vcore reason  (Steven Wong 
via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf747720
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf747720
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf747720

Branch: refs/heads/HDFS-7240
Commit: cf74772064de0ce7cefd541e3f407442f4f0e281
Parents: 3b7ffc4
Author: Allen Wittenauer a...@apache.org
Authored: Tue Jul 21 10:58:52 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Jul 21 10:58:52 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java | 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf747720/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 31f4eaa..398ffc6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -270,6 +270,9 @@ Trunk (Unreleased)
 
 MAPREDUCE-6078. native-task: fix gtest build on macosx (Binglin Chang)
 
+MAPREDUCE-5801. Uber mode's log message is missing a vcore reason
+(Steven Wong via aw)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf747720/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 731bcba..4c3b3fe 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -1289,6 +1289,8 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 msg.append( too much CPU;);
   if (!smallMemory)
 msg.append( too much RAM;);
+  if (!smallCpu)
+  msg.append( too much CPU;);
   if (!notChainJob)
 msg.append( chainjob;);
   LOG.info(msg.toString());



[10/19] hadoop git commit: HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux. (aajisaka)

2015-07-21 Thread aengineer
HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/773c6709
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/773c6709
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/773c6709

Branch: refs/heads/HDFS-7240
Commit: 773c670943757681feeafb227a2d0c29d48f38f1
Parents: d6d5860
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Jul 21 11:21:49 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Jul 21 11:21:49 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/security/UserGroupInformation.java | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/773c6709/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a23a508..ef8e238 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -705,6 +705,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11893. Mark org.apache.hadoop.security.token.Token as
 @InterfaceAudience.Public. (Brahma Reddy Battula via stevel)
 
+HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux.
+(aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773c6709/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index be3d60d..80a0898 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -369,7 +369,8 @@ public class UserGroupInformation {
   private static final boolean windows =
   System.getProperty(os.name).startsWith(Windows);
   private static final boolean is64Bit =
-  System.getProperty(os.arch).contains(64);
+  System.getProperty(os.arch).contains(64) ||
+  System.getProperty(os.arch).contains(s390x);
   private static final boolean aix = 
System.getProperty(os.name).equals(AIX);
 
   /* Return the OS login module class name */



[06/19] hadoop git commit: HADOOP-11893. Mark org.apache.hadoop.security.token.Token as @InterfaceAudience.Public. (Brahma Reddy Battula via stevel)

2015-07-21 Thread aengineer
HADOOP-11893. Mark org.apache.hadoop.security.token.Token as 
@InterfaceAudience.Public. (Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98c2bc87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98c2bc87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98c2bc87

Branch: refs/heads/HDFS-7240
Commit: 98c2bc87b1445c533268c58d382ea4e4297303fd
Parents: a943142
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 13:22:03 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 13:22:14 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../main/java/org/apache/hadoop/security/token/SecretManager.java | 2 +-
 .../src/main/java/org/apache/hadoop/security/token/Token.java | 2 +-
 .../java/org/apache/hadoop/security/token/TokenIdentifier.java| 2 +-
 .../src/main/java/org/apache/hadoop/security/token/TokenInfo.java | 2 +-
 .../main/java/org/apache/hadoop/security/token/TokenRenewer.java  | 2 +-
 .../main/java/org/apache/hadoop/security/token/TokenSelector.java | 2 +-
 .../main/java/org/apache/hadoop/security/token/package-info.java  | 2 +-
 8 files changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1b643a9..a23a508 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -702,6 +702,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12153. ByteBufferReadable doesn't declare @InterfaceAudience and
 @InterfaceStability. (Brahma Reddy Battula via ozawa)
 
+HADOOP-11893. Mark org.apache.hadoop.security.token.Token as
+@InterfaceAudience.Public. (Brahma Reddy Battula via stevel)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
index 5fe0391..798c8c9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.ipc.StandbyException;
  * The server-side secret manager for each token type.
  * @param T The type of the token identifier
  */
-@InterfaceAudience.LimitedPrivate({HDFS, MapReduce})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 public abstract class SecretManagerT extends TokenIdentifier {
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index bd254e6..2420155 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -36,7 +36,7 @@ import java.util.ServiceLoader;
 /**
  * The client-side form of the token.
  */
-@InterfaceAudience.LimitedPrivate({HDFS, MapReduce})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class TokenT extends TokenIdentifier implements Writable {
   public static final Log LOG = LogFactory.getLog(Token.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
index ebf9d58..0b111cc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
+++ 

[05/19] hadoop git commit: HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString() over getMessage() in logging/span events. (Varun Saxena via stevel)

2015-07-21 Thread aengineer
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()  over 
getMessage() in logging/span events. (Varun Saxena via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9431425
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9431425
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9431425

Branch: refs/heads/HDFS-7240
Commit: a9431425d1aff657fc1ea501c706235f2ebc518f
Parents: 05fa336
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 13:13:09 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 13:13:23 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9431425/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bfa9aac..1b643a9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -978,6 +978,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).
 (Brahma Reddy Battula via stevel)
 
+HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
+over getMessage() in logging/span events. (Varun Saxena via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9431425/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index e75de15..cc75f5c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -238,7 +238,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 }
 if (Trace.isTracing()) {
   traceScope.getSpan().addTimelineAnnotation(
-  Call got exception:  + e.getMessage());
+  Call got exception:  + e.toString());
 }
 throw new ServiceException(e);
   } finally {



[14/19] hadoop git commit: HDFS-7582. Enforce maximum number of ACL entries separately per access and default. (Contributed by Vinayakumar B)

2015-07-21 Thread aengineer
HDFS-7582. Enforce maximum number of ACL entries separately per access and 
default. (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29cf887b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29cf887b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29cf887b

Branch: refs/heads/HDFS-7240
Commit: 29cf887b226f4ab3c336a6e681db5e8e70699d66
Parents: 87f29c6
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Jul 21 15:16:52 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Jul 21 15:16:52 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/AclTransformation.java | 30 +++
 .../server/namenode/TestAclTransformation.java  | 55 ++--
 3 files changed, 76 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29cf887b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f38a870..6c91c45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1057,6 +1057,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8344. NameNode doesn't recover lease for files with missing blocks
 (raviprak)
 
+HDFS-7582. Enforce maximum number of ACL entries separately per access
+and default. (vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29cf887b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
index 1474e03..c887e9d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
@@ -271,10 +271,6 @@ final class AclTransformation {
*/
   private static ListAclEntry buildAndValidateAcl(
   ArrayListAclEntry aclBuilder) throws AclException {
-if (aclBuilder.size()  MAX_ENTRIES) {
-  throw new AclException(Invalid ACL: ACL has  + aclBuilder.size() +
- entries, which exceeds maximum of  + MAX_ENTRIES + .);
-}
 aclBuilder.trimToSize();
 Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR);
 // Full iteration to check for duplicates and invalid named entries.
@@ -292,9 +288,12 @@ final class AclTransformation {
   }
   prevEntry = entry;
 }
+
+ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
+checkMaxEntries(scopedEntries);
+
 // Search for the required base access entries.  If there is a default ACL,
 // then do the same check on the default entries.
-ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
 for (AclEntryType type: EnumSet.of(USER, GROUP, OTHER)) {
   AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS)
 .setType(type).build();
@@ -316,6 +315,22 @@ final class AclTransformation {
 return Collections.unmodifiableList(aclBuilder);
   }
 
+  // Check the max entries separately on access and default entries
+  // HDFS-7582
+  private static void checkMaxEntries(ScopedAclEntries scopedEntries)
+  throws AclException {
+ListAclEntry accessEntries = scopedEntries.getAccessEntries();
+ListAclEntry defaultEntries = scopedEntries.getDefaultEntries();
+if (accessEntries.size()  MAX_ENTRIES) {
+  throw new AclException(Invalid ACL: ACL has  + accessEntries.size()
+  +  access entries, which exceeds maximum of  + MAX_ENTRIES + .);
+}
+if (defaultEntries.size()  MAX_ENTRIES) {
+  throw new AclException(Invalid ACL: ACL has  + defaultEntries.size()
+  +  default entries, which exceeds maximum of  + MAX_ENTRIES + .);
+}
+  }
+
   /**
* Calculates mask entries required for the ACL.  Mask calculation is 
performed
* separately for each scope: access and default.  This method is responsible
@@ -444,11 +459,8 @@ final class AclTransformation {
  * @throws AclException if validation fails
  */
 public ValidatedAclSpec(ListAclEntry aclSpec) throws AclException {
-  if (aclSpec.size()  MAX_ENTRIES) {
-throw new AclException(Invalid ACL: ACL spec has  + aclSpec.size() +
-   entries, which exceeds maximum of  + MAX_ENTRIES 

[15/19] hadoop git commit: HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw)

2015-07-21 Thread aengineer
HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9507fe6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9507fe6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9507fe6

Branch: refs/heads/HDFS-7240
Commit: c9507fe6c12491f3aef5cd4142b4d466bd6b71c3
Parents: 29cf887b
Author: Allen Wittenauer a...@apache.org
Authored: Tue Jul 21 09:41:28 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Jul 21 09:41:28 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs | 8 
 2 files changed, 2 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9507fe6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6c91c45..223baaf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -343,6 +343,8 @@ Trunk (Unreleased)
 
 HDFS-8657. Update docs for mSNN. (Jesse Yates via atm)
 
+HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9507fe6/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index a996a80..23a08be 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -274,14 +274,6 @@ fi
 hadoop_finalize
 
 if [[ -n ${supportdaemonization} ]]; then
-  if [[ ${COMMAND} == namenode ]] 
- [[ ${HADOOP_DAEMON_MODE} == stop ]]; then
-hadoop_debug Do checkpoint if necessary before stopping NameNode
-export CLASSPATH
-${JAVA} -Dproc_dfsadmin ${HADOOP_OPTS} 
org.apache.hadoop.hdfs.tools.DFSAdmin -safemode enter
-${JAVA} -Dproc_dfsadmin ${HADOOP_OPTS} 
org.apache.hadoop.hdfs.tools.DFSAdmin -saveNamespace -beforeShutdown
-${JAVA} -Dproc_dfsadmin ${HADOOP_OPTS} 
org.apache.hadoop.hdfs.tools.DFSAdmin -safemode leave
-  fi
   if [[ -n ${secure_service} ]]; then
 hadoop_secure_daemon_handler \
 ${HADOOP_DAEMON_MODE} ${COMMAND} ${CLASS}\



[11/19] hadoop git commit: Move HDFS-6945 to 2.7.2 section in CHANGES.txt.

2015-07-21 Thread aengineer
Move HDFS-6945 to 2.7.2 section in CHANGES.txt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a628f675
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a628f675
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a628f675

Branch: refs/heads/HDFS-7240
Commit: a628f675900d2533ddf86fb3d3e601238ecd68c3
Parents: 773c670
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Jul 21 11:45:00 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Jul 21 11:45:00 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a628f675/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 388b553..1293388 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -762,9 +762,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-7997. The first non-existing xattr should also throw IOException.
 (zhouyingchao via yliu)
 
-HDFS-6945. BlockManager should remove a block from excessReplicateMap and
-decrement ExcessBlocks metric when the block is removed. (aajisaka)
-
 HDFS-7922. ShortCircuitCache#close is not releasing
 ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
 
@@ -1072,8 +1069,11 @@ Release 2.7.2 - UNRELEASED
   HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
 
   BUG FIXES
-
-Release 2.7.1 - 2015-07-06 
+
+HDFS-6945. BlockManager should remove a block from excessReplicateMap and
+decrement ExcessBlocks metric when the block is removed. (aajisaka)
+
+Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES
 



[04/19] hadoop git commit: HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json). (Brahma Reddy Battula via stevel)

2015-07-21 Thread aengineer
HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).   
(Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05fa3368
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05fa3368
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05fa3368

Branch: refs/heads/HDFS-7240
Commit: 05fa3368f12d189a95a2d6cd8eebc6f7e3a719ee
Parents: 9141e1a
Author: Steve Loughran ste...@apache.org
Authored: Mon Jul 20 13:02:51 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Mon Jul 20 13:03:03 2015 +0100

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05fa3368/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 18475b9..bfa9aac 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -975,6 +975,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12209 Comparable type should be in FileStatus.
 (Yong Zhang via stevel)
 
+HADOOP-12088. KMSClientProvider uses equalsIgnoreCase(application/json).
+(Brahma Reddy Battula via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05fa3368/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 223e69a..1ffc44d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -544,7 +544,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   // AuthenticatedURL properly to set authToken post initialization)
 }
 HttpExceptionUtils.validateResponse(conn, expectedResponse);
-if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
+if (conn.getContentType() != null
+ conn.getContentType().trim().toLowerCase()
+.startsWith(APPLICATION_JSON_MIME)
  klass != null) {
   ObjectMapper mapper = new ObjectMapper();
   InputStream is = null;



[01/19] hadoop git commit: Pulling in YARN-3535 to branch 2.7.x

2015-07-21 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 857686119 - 942e1ac21


Pulling in YARN-3535 to branch 2.7.x


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/176131f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/176131f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/176131f1

Branch: refs/heads/HDFS-7240
Commit: 176131f12bc0d467e9caaa6a94b4ba96e09a4539
Parents: 419c51d
Author: Arun Suresh asur...@apache.org
Authored: Sat Jul 18 10:05:54 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Sat Jul 18 10:05:54 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/176131f1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8f7a365..e6a3343 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -646,9 +646,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue 
is 
 more than 2 level. (Ajith S via wangda)
 
-YARN-3535. Scheduler must re-request container resources when RMContainer 
transitions
-from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -675,6 +672,9 @@ Release 2.7.2 - UNRELEASED
 YARN-3905. Application History Server UI NPEs when accessing apps run after
 RM restart (Eric Payne via jeagles)
 
+YARN-3535. Scheduler must re-request container resources when RMContainer 
transitions
+from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES



[18/19] hadoop git commit: YARN-3261. rewrite resourcemanager restart doc to remove roadmap bits (Gururaj Shetty via aw)

2015-07-21 Thread aengineer
YARN-3261. rewrite resourcemanager restart doc to remove roadmap bits (Gururaj 
Shetty via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b7ffc4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b7ffc4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b7ffc4f

Branch: refs/heads/HDFS-7240
Commit: 3b7ffc4f3f0ffb0fa6c324da6d88803f5b233832
Parents: c39ca54
Author: Allen Wittenauer a...@apache.org
Authored: Tue Jul 21 10:00:20 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Jul 21 10:00:34 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 ++
 .../src/site/markdown/ResourceManagerRestart.md | 32 +---
 2 files changed, 16 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b7ffc4f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7259cf2..79e9ae2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -29,6 +29,8 @@ Trunk - Unreleased
 YARN-2280. Resource manager web service fields are not accessible
 (Krisztian Horvath via aw)
 
+YARN-3261. rewrite resourcemanager restart doc to remove roadmap bits 
(Gururaj Shetty via aw)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b7ffc4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
index d23505d..ee222c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
@@ -31,34 +31,30 @@ ResourceManger Restart
 Overview
 
 
-ResourceManager is the central authority that manages resources and schedules 
applications running atop of YARN. Hence, it is potentially a single point of 
failure in a Apache YARN cluster.
-`
-This document gives an overview of ResourceManager Restart, a feature that 
enhances ResourceManager to keep functioning across restarts and also makes 
ResourceManager down-time invisible to end-users.
+ResourceManager is the central authority that manages resources and schedules 
applications running on YARN. Hence, it is potentially a single point of 
failure in an Apache YARN cluster. This document gives an overview of 
ResourceManager Restart, a feature that enhances ResourceManager to keep 
functioning across restarts and also makes ResourceManager down-time invisible 
to end-users.
 
-ResourceManager Restart feature is divided into two phases: 
+There are two types of restart for ResourceManager:
 
-* **ResourceManager Restart Phase 1 (Non-work-preserving RM restart)**: 
Enhance RM to persist application/attempt state and other credentials 
information in a pluggable state-store. RM will reload this information from 
state-store upon restart and re-kick the previously running applications. Users 
are not required to re-submit the applications.
+* **Non-work-preserving RM restart**: This restart enhances RM to persist 
application/attempt state and other credentials information in a pluggable 
state-store. RM will reload this information from state-store on restart and 
re-kick the previously running applications. Users are not required to 
re-submit the applications.
 
-* **ResourceManager Restart Phase 2 (Work-preserving RM restart)**: Focus on 
re-constructing the running state of ResourceManager by combining the container 
statuses from NodeManagers and container requests from ApplicationMasters upon 
restart. The key difference from phase 1 is that previously running 
applications will not be killed after RM restarts, and so applications won't 
lose its work because of RM outage.
+* **Work-preserving RM restart**: This focuses on re-constructing the running 
state of RM by combining the container status from NodeManagers and container 
requests from ApplicationMasters on restart. The key difference from 
Non-work-preserving RM restart is that previously running applications will not 
be killed after RM restarts, and so applications will not lose its work because 
of RM outage.
 
 Feature
 ---
 
-* **Phase 1: Non-work-preserving RM restart** 
+* **Non-work-preserving RM restart**
 
- As of Hadoop 2.4.0 release, only ResourceManager Restart Phase 1 is 
implemented which is described below.
+ In non-work-preserving RM restart, RM will 

[19/19] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2015-07-21 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/942e1ac2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/942e1ac2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/942e1ac2

Branch: refs/heads/HDFS-7240
Commit: 942e1ac213c86956c9f8b6fdf3c38e4dc5a84238
Parents: 8576861 3b7ffc4
Author: Anu Engineer anu.engin...@gmail.com
Authored: Tue Jul 21 11:14:40 2015 -0700
Committer: Anu Engineer anu.engin...@gmail.com
Committed: Tue Jul 21 11:14:40 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  21 ++
 .../crypto/key/kms/KMSClientProvider.java   |   4 +-
 .../java/org/apache/hadoop/fs/FileStatus.java   |  15 +-
 .../org/apache/hadoop/fs/LocatedFileStatus.java |  10 +-
 .../fs/viewfs/ViewFsLocatedFileStatus.java  |   3 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|   2 +-
 .../hadoop/security/UserGroupInformation.java   |   3 +-
 .../hadoop/security/token/SecretManager.java|   2 +-
 .../org/apache/hadoop/security/token/Token.java |   2 +-
 .../hadoop/security/token/TokenIdentifier.java  |   2 +-
 .../apache/hadoop/security/token/TokenInfo.java |   2 +-
 .../hadoop/security/token/TokenRenewer.java |   2 +-
 .../hadoop/security/token/TokenSelector.java|   2 +-
 .../hadoop/security/token/package-info.java |   2 +-
 .../org/apache/hadoop/fs/TestFileStatus.java|  22 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  25 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |   8 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   3 +
 .../BlockInfoUnderConstruction.java |  19 +-
 .../server/blockmanagement/BlockManager.java|  14 +-
 .../blockmanagement/CorruptReplicasMap.java |  19 +-
 .../hdfs/server/namenode/AclTransformation.java |  30 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  10 +
 .../src/main/resources/hdfs-default.xml |   9 +
 .../src/main/webapps/hdfs/dfshealth.html|  25 ++
 .../src/main/webapps/hdfs/dfshealth.js  |   8 +
 .../markdown/HDFSHighAvailabilityWithNFS.md |  40 ++-
 .../markdown/HDFSHighAvailabilityWithQJM.md |  32 +-
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   |  78 +
 .../blockmanagement/TestBlockStatsMXBean.java   |   1 -
 .../blockmanagement/TestCorruptReplicaInfo.java |  12 +-
 .../server/namenode/TestAclTransformation.java  |  55 ++-
 .../org/apache/hadoop/tools/HadoopArchives.java |  87 +++--
 .../apache/hadoop/tools/TestHadoopArchives.java |   4 +-
 hadoop-tools/hadoop-openstack/pom.xml   |   4 +-
 .../sls/scheduler/ResourceSchedulerWrapper.java |  10 +
 hadoop-yarn-project/CHANGES.txt |  13 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   5 +
 .../org/apache/hadoop/yarn/client/SCMAdmin.java |   2 +-
 .../server/resourcemanager/RMAppManager.java|  20 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  15 +-
 .../scheduler/AbstractYarnScheduler.java|  10 +
 .../server/resourcemanager/scheduler/Queue.java |   8 +
 .../scheduler/SchedulerApplication.java |  22 ++
 .../scheduler/SchedulerApplicationAttempt.java  |  15 +-
 .../scheduler/YarnScheduler.java|  20 ++
 .../scheduler/capacity/AbstractCSQueue.java |   7 +
 .../scheduler/capacity/CapacityScheduler.java   |  73 +++-
 .../CapacitySchedulerConfiguration.java |  13 +
 .../scheduler/capacity/LeafQueue.java   |  19 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   8 +
 .../scheduler/event/AppAddedSchedulerEvent.java |  28 +-
 .../resourcemanager/scheduler/fair/FSQueue.java |   6 +
 .../scheduler/fifo/FifoScheduler.java   |   6 +
 .../scheduler/policy/FifoComparator.java|  11 +-
 .../scheduler/policy/SchedulableEntity.java |   5 +
 .../yarn/server/resourcemanager/MockRM.java |  31 +-
 .../server/resourcemanager/TestAppManager.java  |   1 +
 .../TestWorkPreservingRMRestart.java|   2 +-
 ...pacityPreemptionPolicyForNodePartitions.java |   1 +
 .../capacity/TestApplicationLimits.java |   5 +-
 .../capacity/TestApplicationPriority.java   | 345 +++
 .../capacity/TestCapacityScheduler.java |   5 +
 .../scheduler/policy/MockSchedulableEntity.java |  13 +-
 .../security/TestDelegationTokenRenewer.java|  10 +-
 .../TestRMWebServicesAppsModification.java  |   2 +-
 .../src/site/markdown/ResourceManagerRestart.md |  32 +-
 67 files changed, 1133 insertions(+), 207 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/942e1ac2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--



[09/19] hadoop git commit: HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)

2015-07-21 Thread aengineer
HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6d58606
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6d58606
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6d58606

Branch: refs/heads/HDFS-7240
Commit: d6d58606b8adf94b208aed5fc2d054b9dd081db1
Parents: ed01dc7
Author: yliu y...@apache.org
Authored: Tue Jul 21 09:20:22 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Jul 21 09:20:22 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  2 ++
 .../blockmanagement/CorruptReplicasMap.java  | 19 ++-
 .../blockmanagement/TestCorruptReplicaInfo.java  | 12 ++--
 3 files changed, 22 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cd32c0e..388b553 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -727,6 +727,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
 files rather than the entire DFSClient. (mingma)
 
+HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index fc2e234..f83cbaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -17,12 +17,19 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.Server;
 
-import java.util.*;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Stores information about all corrupt blocks in the File System.
@@ -46,8 +53,8 @@ public class CorruptReplicasMap{
 CORRUPTION_REPORTED  // client or datanode reported the corruption
   }
 
-  private final SortedMapBlock, MapDatanodeDescriptor, Reason 
corruptReplicasMap =
-new TreeMapBlock, MapDatanodeDescriptor, Reason();
+  private final MapBlock, MapDatanodeDescriptor, Reason corruptReplicasMap 
=
+new HashMapBlock, MapDatanodeDescriptor, Reason();
 
   /**
* Mark the block belonging to datanode as corrupt.
@@ -181,13 +188,15 @@ public class CorruptReplicasMap{
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists
*
*/
-  long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
+  @VisibleForTesting
+  long[] getCorruptReplicaBlockIdsForTesting(int numExpectedBlocks,
Long startingBlockId) {
 if (numExpectedBlocks  0 || numExpectedBlocks  100) {
   return null;
 }
 
-IteratorBlock blockIt = corruptReplicasMap.keySet().iterator();
+IteratorBlock blockIt = 
+new TreeMap(corruptReplicasMap).keySet().iterator();
 
 // if the starting block id was specified, iterate over keys until
 // we find the matching block. If we find a matching block, break

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index 21fb54e..4bdaaac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java

[2/3] hadoop git commit: HADOOP-11762. Enable swift distcp to secure HDFS (Chen He via aw)

2015-07-21 Thread aengineer
HADOOP-11762. Enable swift distcp to secure HDFS (Chen He via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68d1f4bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68d1f4bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68d1f4bf

Branch: refs/heads/HDFS-7240
Commit: 68d1f4bfe863761d3c83b79b17ec1194edca3172
Parents: cf74772
Author: Allen Wittenauer a...@apache.org
Authored: Tue Jul 21 11:19:29 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Jul 21 11:19:29 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 .../hadoop/fs/swift/snative/SwiftNativeFileSystem.java  | 9 +
 .../apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java | 7 +++
 3 files changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68d1f4bf/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 24709e0..5b51bce 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -495,6 +495,8 @@ Trunk (Unreleased)
 HADOOP-12107. long running apps may have a huge number of StatisticsData
 instances under FileSystem (Sangjin Lee via Ming Ma)
 
+HADOOP-11762. Enable swift distcp to secure HDFS (Chen He via aw)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68d1f4bf/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
 
b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
index e9faaf2..7f93c38 100644
--- 
a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
+++ 
b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
@@ -222,6 +222,15 @@ public class SwiftNativeFileSystem extends FileSystem {
   }
 
   /**
+   * Override getCononicalServiceName because we don't support token in Swift
+   */
+  @Override
+  public String getCanonicalServiceName() {
+// Does not support Token
+return null;
+  }
+
+  /**
* Return an array containing hostnames, offset and size of
* portions of the given file.  For a nonexistent
* file or regions, null will be returned.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68d1f4bf/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java
--
diff --git 
a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java
 
b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java
index c7e8b57..c84be6b 100644
--- 
a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java
+++ 
b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.fs.swift;
 
+import org.junit.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileStatus;
@@ -286,4 +287,10 @@ public class TestSwiftFileSystemBasicOps extends 
SwiftFileSystemBaseTest {
 }
   }
 
+  @Test(timeout = SWIFT_TEST_TIMEOUT)
+  public void testGetCanonicalServiceName() {
+Assert.assertNull(fs.getCanonicalServiceName());
+  }
+
+
 }



[3/3] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2015-07-21 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12bd9639
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12bd9639
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12bd9639

Branch: refs/heads/HDFS-7240
Commit: 12bd96395a0b905a3bc721486461b3c112148983
Parents: 942e1ac 68d1f4b
Author: Anu Engineer anu.engin...@gmail.com
Authored: Tue Jul 21 11:29:19 2015 -0700
Committer: Anu Engineer anu.engin...@gmail.com
Committed: Tue Jul 21 11:29:19 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java| 2 ++
 .../hadoop/fs/swift/snative/SwiftNativeFileSystem.java  | 9 +
 .../apache/hadoop/fs/swift/TestSwiftFileSystemBasicOps.java | 7 +++
 5 files changed, 23 insertions(+)
--




[2/2] hadoop git commit: Merge branch 'trunk' into hdfs-7240

2015-11-04 Thread aengineer
Merge branch 'trunk' into hdfs-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/312d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/312d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/312d

Branch: refs/heads/hdfs-7240
Commit: 312deac6781bd15a5e1a46e2007243bf5186
Parents: b14a70e 5667129
Author: Anu Engineer 
Authored: Wed Nov 4 14:19:34 2015 -0800
Committer: Anu Engineer 
Committed: Wed Nov 4 14:19:34 2015 -0800

--
 .gitignore  |1 +
 LICENSE.txt |   59 +
 dev-support/docker/Dockerfile   |7 +-
 dev-support/test-patch.sh   |   10 +-
 .../main/resources/assemblies/hadoop-dist.xml   |4 +-
 .../assemblies/hadoop-hdfs-nfs-dist.xml |4 +-
 .../resources/assemblies/hadoop-httpfs-dist.xml |4 +-
 .../resources/assemblies/hadoop-kms-dist.xml|4 +-
 .../assemblies/hadoop-mapreduce-dist.xml|4 +-
 .../resources/assemblies/hadoop-nfs-dist.xml|4 +-
 .../main/resources/assemblies/hadoop-sls.xml|4 +-
 .../main/resources/assemblies/hadoop-src.xml|4 +-
 .../main/resources/assemblies/hadoop-tools.xml  |4 +-
 .../resources/assemblies/hadoop-yarn-dist.xml   |4 +-
 hadoop-client/pom.xml   |6 +-
 .../JWTRedirectAuthenticationHandler.java   |7 +-
 .../server/KerberosAuthenticationHandler.java   |4 +-
 .../TestJWTRedirectAuthentictionHandler.java|   42 +-
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   74 -
 hadoop-common-project/hadoop-common/CHANGES.txt |  270 +-
 hadoop-common-project/hadoop-common/pom.xml |5 +
 .../hadoop-common/src/main/bin/hadoop   |   15 +-
 .../hadoop-common/src/main/bin/hadoop-daemon.sh |6 +-
 .../src/main/bin/hadoop-daemons.sh  |6 +-
 .../src/main/bin/hadoop-functions.sh|  109 +-
 .../src/main/bin/hadoop-layout.sh.example   |   16 +-
 .../hadoop-common/src/main/bin/rcc  |4 +-
 .../hadoop-common/src/main/bin/slaves.sh|6 +-
 .../hadoop-common/src/main/bin/start-all.sh |6 +-
 .../hadoop-common/src/main/bin/stop-all.sh  |6 +-
 .../main/conf/hadoop-user-functions.sh.example  |   10 +-
 .../org/apache/hadoop/conf/Configuration.java   |2 +-
 .../fs/CommonConfigurationKeysPublic.java   |   11 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |   26 +-
 .../java/org/apache/hadoop/fs/FileUtil.java |   29 -
 .../org/apache/hadoop/fs/FilterFileSystem.java  |8 +-
 .../java/org/apache/hadoop/fs/GlobFilter.java   |2 +-
 .../java/org/apache/hadoop/fs/GlobPattern.java  |7 +-
 .../main/java/org/apache/hadoop/fs/Globber.java |2 +-
 .../org/apache/hadoop/fs/HarFileSystem.java |6 +
 .../java/org/apache/hadoop/fs/HardLink.java |8 +
 .../org/apache/hadoop/fs/LocalDirAllocator.java |6 +-
 .../apache/hadoop/fs/shell/CopyCommands.java|6 +-
 .../java/org/apache/hadoop/fs/shell/Delete.java |2 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |   53 +-
 .../org/apache/hadoop/ha/HAServiceTarget.java   |   50 +-
 .../org/apache/hadoop/ha/HealthMonitor.java |2 +-
 .../org/apache/hadoop/http/HttpServer2.java |2 +
 .../java/org/apache/hadoop/io/SequenceFile.java |   15 +-
 .../org/apache/hadoop/io/WritableUtils.java |8 +-
 .../apache/hadoop/io/erasurecode/CodecUtil.java |2 +
 .../apache/hadoop/io/erasurecode/ECBlock.java   |3 +
 .../hadoop/io/erasurecode/ECBlockGroup.java |3 +
 .../apache/hadoop/io/erasurecode/ECChunk.java   |3 +
 .../apache/hadoop/io/erasurecode/ECSchema.java  |5 +
 .../erasurecode/codec/AbstractErasureCodec.java |2 +
 .../io/erasurecode/codec/ErasureCodec.java  |2 +
 .../io/erasurecode/codec/RSErasureCodec.java|2 +
 .../io/erasurecode/codec/XORErasureCodec.java   |2 +
 .../erasurecode/coder/AbstractErasureCoder.java |2 +
 .../coder/AbstractErasureCodingStep.java|2 +
 .../coder/AbstractErasureDecoder.java   |   25 +-
 .../coder/AbstractErasureEncoder.java   |2 +
 .../io/erasurecode/coder/ErasureCoder.java  |2 +
 .../io/erasurecode/coder/ErasureCodingStep.java |2 +
 .../erasurecode/coder/ErasureDecodingStep.java  |2 +
 .../erasurecode/coder/ErasureEncodingStep.java  |2 +
 .../io/erasurecode/coder/RSErasureDecoder.java  |2 +
 .../io/erasurecode/coder/RSErasureEncoder.java  |2 +
 .../io/erasurecode/coder/XORErasureDecoder.java |2 +
 .../io/erasurecode/coder/XORErasureEncoder.java |2 +
 .../io/erasurecode/grouper/BlockGrouper.java|2 +
 .../rawcoder/AbstractRawErasureCoder.java   |  114 +-
 .../rawcoder/AbstractRawErasureDecoder.java |   10 +-
 

[1/2] hadoop git commit: Merge branch 'trunk' into hdfs-7240

2015-11-04 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/hdfs-7240 [created] 312de


http://git-wip-us.apache.org/repos/asf/hadoop/blob/312d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index da09b0e,29bcd79..c93a362
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@@ -2816,30 -2633,14 +2831,30 @@@ public class DataNode extends Reconfigu
}
  
/**
-* Convenience method, which unwraps RemoteException.
-* @throws IOException not a RemoteException.
-*/
 -   * Update replica with the new generation stamp and length.  
++  * Convenience method, which unwraps RemoteException.
++  * @throws IOException not a RemoteException.
++  */
 +  private static ReplicaRecoveryInfo callInitReplicaRecovery(
 +  InterDatanodeProtocol datanode,
 +  RecoveringBlock rBlock) throws IOException {
 +try {
 +  return datanode.initReplicaRecovery(rBlock);
- } catch(RemoteException re) {
++} catch (RemoteException re) {
 +  throw re.unwrapRemoteException();
 +}
 +  }
 +
 +  /**
-* Update replica with the new generation stamp and length.  
++   * Update replica with the new generation stamp and length.
 */
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
--  final long recoveryId, final long newBlockId, final long newLength)
++   final long recoveryId, final long 
newBlockId, final long newLength)
throws IOException {
 -final String storageID = data.updateReplicaUnderRecovery(oldBlock,
 -recoveryId, newBlockId, newLength);
 +final FsDatasetSpi dataset =
 +(FsDatasetSpi) getDataset(oldBlock.getBlockPoolId());
 +final String storageID = dataset.updateReplicaUnderRecovery(
 +oldBlock, recoveryId, newBlockId, newLength);
  // Notify the namenode of the updated block info. This is important
  // for HA, since otherwise the standby node may lose track of the
  // block locations until the next block report.
@@@ -2851,234 -2652,6 +2866,244 @@@
  return storageID;
}
  
-   /** A convenient class used in block recovery */
-   static class BlockRecord { 
++  /**
++   * A convenient class used in block recovery
++   */
++  static class BlockRecord {
 +final DatanodeID id;
 +final InterDatanodeProtocol datanode;
 +final ReplicaRecoveryInfo rInfo;
- 
 +private String storageID;
 +
 +BlockRecord(DatanodeID id,
 +InterDatanodeProtocol datanode,
 +ReplicaRecoveryInfo rInfo) {
 +  this.id = id;
 +  this.datanode = datanode;
 +  this.rInfo = rInfo;
 +}
 +
 +void updateReplicaUnderRecovery(String bpid, long recoveryId,
 +long newBlockId, long newLength)
 +throws IOException {
 +  final ExtendedBlock b = new ExtendedBlock(bpid, rInfo);
 +  storageID = datanode.updateReplicaUnderRecovery(b, recoveryId, 
newBlockId,
 +  newLength);
 +}
 +
 +@Override
 +public String toString() {
 +  return "block:" + rInfo + " node:" + id;
 +}
 +  }
 +
-   /** Recover a block */
++
++  /**
++   * Recover a block
++   */
 +  private void recoverBlock(RecoveringBlock rBlock) throws IOException {
 +ExtendedBlock block = rBlock.getBlock();
 +String blookPoolId = block.getBlockPoolId();
 +DatanodeID[] datanodeids = rBlock.getLocations();
 +List syncList = new 
ArrayList(datanodeids.length);
 +int errorCount = 0;
 +
 +//check generation stamps
- for(DatanodeID id : datanodeids) {
++for (DatanodeID id : datanodeids) {
 +  try {
 +BPOfferService bpos = blockPoolManager.get(blookPoolId);
 +DatanodeRegistration bpReg = bpos.bpRegistration;
- InterDatanodeProtocol datanode = bpReg.equals(id)?
- this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
- dnConf.socketTimeout, dnConf.connectToDnViaHostname);
++InterDatanodeProtocol datanode = bpReg.equals(id) ?
++this : DataNode.createInterDataNodeProtocolProxy(id, getConf(),
++dnConf.socketTimeout, dnConf.connectToDnViaHostname);
 +ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
 +if (info != null &&
 +info.getGenerationStamp() >= block.getGenerationStamp() &&
 +info.getNumBytes() > 0) {
 +  syncList.add(new BlockRecord(id, datanode, info));
 +}
 +  } catch (RecoveryInProgressException ripE) {
 +InterDatanodeProtocol.LOG.warn(
 +"Recovery 

[20/50] [abbrv] hadoop git commit: Fix CHANGES.txt

2015-11-04 Thread aengineer
Fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2529464f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2529464f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2529464f

Branch: refs/heads/HDFS-7240
Commit: 2529464f0841732792343d515cd1be1dccb3c453
Parents: 6e4f8a4
Author: Kihwal Lee 
Authored: Mon Nov 2 09:09:33 2015 -0600
Committer: Kihwal Lee 
Committed: Mon Nov 2 09:09:33 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2529464f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8e6634a..0bbc60d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1529,9 +1529,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9110. Use Files.walkFileTree in NNUpgradeUtil#doPreUpgrade for
 better efficiency. (Charlie Helin via wang)
 
-HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
-values() since it creates a temporary array. (Staffan Friberg via yliu)
-
 HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
 BlockManager#excessReplicateMap. (yliu)
 
@@ -2220,6 +2217,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-8099. Change "DFSInputStream has been closed already" message to
 debug log level (Charles Lamb via Colin P. McCabe)
 
+HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
+values() since it creates a temporary array. (Staffan Friberg via yliu)
+
   OPTIMIZATIONS
 
 HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)



[37/50] [abbrv] hadoop git commit: HDFS-9351. checkNNStartup() need to be called when fsck calls FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)

2015-11-04 Thread aengineer
HDFS-9351. checkNNStartup() need to be called when fsck calls 
FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/194251c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/194251c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/194251c8

Branch: refs/heads/HDFS-7240
Commit: 194251c85250fcbe80a6ffee88b2cd4689334be3
Parents: dac0463
Author: Yongjun Zhang 
Authored: Tue Nov 3 17:16:17 2015 -0800
Committer: Yongjun Zhang 
Committed: Tue Nov 3 17:16:17 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hdfs/server/namenode/FSNamesystem.java  | 20 
 .../hdfs/server/namenode/NamenodeFsck.java  |  9 -
 3 files changed, 11 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/194251c8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 13c4094..2def995 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2228,6 +2228,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
 commitBlock. (Chang Li via zhz)
 
+HDFS-9351. checkNNStartup() need to be called when fsck calls
+FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/194251c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 65b40c8..734e3ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -6364,26 +6364,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return list;
   }
 
-  /**
-   * Get the list of snapshottable directories.
-   * @return The list of all the current snapshottable directories
-   * @see #getSnapshottableDirListing()
-   * @throws IOException
-   */
-  List getSnapshottableDirs() throws IOException {
-List snapshottableDirs = new ArrayList();
-final FSPermissionChecker pc = getFSDirectory().getPermissionChecker();
-final String user = pc.isSuperUser() ? null : pc.getUser();
-final SnapshottableDirectoryStatus[] snapDirs =
-snapshotManager.getSnapshottableDirListing(user);
-if (snapDirs != null) {
-  for (SnapshottableDirectoryStatus sds : snapDirs) {
-snapshottableDirs.add(sds.getFullPath().toString());
-  }
-}
-return snapshottableDirs;
-  }
-
   @Override  //NameNodeMXBean
   public int getDistinctVersionCount() {
 return blockManager.getDatanodeManager().getDatanodesSoftwareVersions()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/194251c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 0b2a53b..9d4edb5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@@ -345,7 +346,13 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   namenode.getNamesystem().logFsckEvent(path, remoteAddress);
 
   if (snapshottableDirs != null) {
-   

[36/50] [abbrv] hadoop git commit: HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in commitBlock. Contributed by Chang Li.

2015-11-04 Thread aengineer
HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in 
commitBlock. Contributed by Chang Li.

Change-Id: If5ce1b2d212bb0726bce52ad12a3de401bcec02d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dac0463a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dac0463a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dac0463a

Branch: refs/heads/HDFS-7240
Commit: dac0463a4e20dfb3a802355919fc22b8e017a4e1
Parents: 7e28296
Author: Zhe Zhang 
Authored: Tue Nov 3 13:34:05 2015 -0800
Committer: Zhe Zhang 
Committed: Tue Nov 3 13:34:24 2015 -0800

--
 .../org/apache/hadoop/hdfs/DataStreamer.java|   2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/blockmanagement/BlockInfo.java  |   2 +-
 .../server/blockmanagement/BlockManager.java|   4 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  27 +++--
 .../TestCommitBlockWithInvalidGenStamp.java | 100 +++
 .../namenode/TestQuotaWithStripedBlocks.java|   4 +-
 7 files changed, 128 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dac0463a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 03c2c52..7cb89c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -351,7 +351,7 @@ class DataStreamer extends Daemon {
   }
 
   private volatile boolean streamerClosed = false;
-  protected ExtendedBlock block; // its length is number of bytes acked
+  protected volatile ExtendedBlock block; // its length is number of bytes 
acked
   protected Token accessToken;
   private DataOutputStream blockStream;
   private DataInputStream blockReplyStream;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dac0463a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fbf211f..13c4094 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2225,6 +2225,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
 endings, fails on Windows. (cnauroth)
 
+HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
+commitBlock. (Chang Li via zhz)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dac0463a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index e15b5ee..e9fa123 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -411,7 +411,7 @@ public abstract class BlockInfo extends Block
 }
 Preconditions.checkState(!isComplete());
 uc.commit();
-this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp());
+this.setNumBytes(block.getNumBytes());
 // Sort out invalid replicas.
 setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dac0463a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index dbe0726..3c6c4d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -653,6 +653,10 @@ public class BlockManager 

[46/50] [abbrv] hadoop git commit: HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for filesystem entirely allocated for DFS use. (Tony Wu via lei)

2015-11-04 Thread aengineer
HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for 
filesystem entirely allocated for DFS use. (Tony Wu via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2a5441b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2a5441b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2a5441b

Branch: refs/heads/HDFS-7240
Commit: e2a5441b062fd0758138079d24a2740fc5e5e350
Parents: ec41460
Author: Lei Xu 
Authored: Wed Nov 4 10:27:35 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:27:35 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a5441b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fd560d1..5f3ff11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1620,6 +1620,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via 
lei)
 
+HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account 
for
+filesystem entirely allocated for DFS use. (Tony Wu via lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a5441b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 5c865e1..2219aa6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -136,7 +136,7 @@ public class TestNameNodeMXBean {
   assertTrue(liveNodes.size() == 2);
   for (Map liveNode : liveNodes.values()) {
 assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
-assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0);
+assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) >= 0);
 assertTrue(liveNode.containsKey("capacity"));
 assertTrue(((Long)liveNode.get("capacity")) > 0);
 assertTrue(liveNode.containsKey("numBlocks"));



[25/50] [abbrv] hadoop git commit: HADOOP-12508. delete fails with exception when lease is held on blob. Contributed by Gaurav Kanade.

2015-11-04 Thread aengineer
HADOOP-12508. delete fails with exception when lease is held on blob. 
Contributed by Gaurav Kanade.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e7dcab1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e7dcab1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e7dcab1

Branch: refs/heads/HDFS-7240
Commit: 9e7dcab185abf2fdabb28f2799b9952b5664a4b0
Parents: 3ce0a65
Author: cnauroth 
Authored: Mon Nov 2 10:21:39 2015 -0800
Committer: cnauroth 
Committed: Mon Nov 2 10:21:39 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../fs/azure/AzureNativeFileSystemStore.java| 32 +++-
 .../hadoop/fs/azure/SelfRenewingLease.java  |  5 +-
 .../fs/azure/TestNativeAzureFileSystemLive.java | 86 
 4 files changed, 124 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e7dcab1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c8d60b0..1a9c93c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1307,6 +1307,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12533. Introduce FileNotFoundException in WASB for read and seek 
API.
 (Dushyanth via cnauroth)
 
+HADOOP-12508. delete fails with exception when lease is held on blob.
+(Gaurav Kanade via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e7dcab1/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 6412714..69ece4a 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2370,7 +2370,37 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 
   @Override
   public void delete(String key) throws IOException {
-delete(key, null);
+try {
+  delete(key, null);
+} catch (IOException e) {
+  Throwable t = e.getCause();
+  if(t != null && t instanceof StorageException) {
+StorageException se = (StorageException) t;
+if(se.getErrorCode().equals(("LeaseIdMissing"))){
+  SelfRenewingLease lease = null;
+  try {
+lease = acquireLease(key);
+delete(key, lease);
+  } catch (AzureException e3) {
+LOG.warn("Got unexpected exception trying to acquire lease on "
++ key + "." + e3.getMessage());
+throw e3;
+  } finally {
+try {
+  if(lease != null){
+lease.free();
+  }
+} catch (Exception e4){
+  LOG.error("Unable to free lease on " + key, e4);
+}
+  }
+} else {
+  throw e;
+}
+  } else {
+throw e;
+  }
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e7dcab1/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
index 06f32ce..900d730 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
@@ -22,6 +22,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import com.microsoft.azure.storage.AccessCondition;
 import com.microsoft.azure.storage.StorageException;
 import com.microsoft.azure.storage.blob.CloudBlob;
@@ -61,7 +63,8 @@ public class SelfRenewingLease {
 
 
   // Time to wait to retry getting the lease in milliseconds
-  private static final int LEASE_ACQUIRE_RETRY_INTERVAL = 2000;
+  @VisibleForTesting
+  static final int 

[44/50] [abbrv] hadoop git commit: Revert "HDFS-8855. Webhdfs client leaks active NameNode connections. Contributed by Xiaobing Zhou."

2015-11-04 Thread aengineer
Revert "HDFS-8855. Webhdfs client leaks active NameNode connections. 
Contributed by Xiaobing Zhou."

This reverts commit 84cbd72afda6344e220526fac5c560f00f84e374.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88beb46c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88beb46c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88beb46c

Branch: refs/heads/HDFS-7240
Commit: 88beb46cf6e6fd3e51f73a411a2750de7595e326
Parents: 3fb1ece
Author: Haohui Mai 
Authored: Wed Nov 4 10:21:13 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 4 10:21:13 2015 -0800

--
 .../org/apache/hadoop/security/token/Token.java |  11 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 -
 .../web/webhdfs/DataNodeUGIProvider.java| 106 ++---
 .../datanode/web/webhdfs/WebHdfsHandler.java|   2 +-
 .../src/main/resources/hdfs-default.xml |   8 -
 .../web/webhdfs/TestDataNodeUGIProvider.java| 231 ---
 7 files changed, 19 insertions(+), 346 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index f189a96..2420155 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.security.token;
 
 import com.google.common.collect.Maps;
-import com.google.common.primitives.Bytes;
-
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -31,11 +29,9 @@ import org.apache.hadoop.io.*;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import java.io.*;
-import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Map;
 import java.util.ServiceLoader;
-import java.util.UUID;
 
 /**
  * The client-side form of the token.
@@ -341,12 +337,7 @@ public class Token implements 
Writable {
 identifierToString(buffer);
 return buffer.toString();
   }
-
-  public String buildCacheKey() {
-return UUID.nameUUIDFromBytes(
-Bytes.concat(kind.getBytes(), identifier, password)).toString();
-  }
-
+  
   private static ServiceLoader renewers =
   ServiceLoader.load(TokenRenewer.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 500dc92..f2d8296 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2152,9 +2152,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9160. [OIV-Doc] : Missing details of 'delimited' for processor options
 (nijel via vinayakumarb)
 
-HDFS-8855. Webhdfs client leaks active NameNode connections.
-(Xiaobing Zhou via jitendra) 
-
 HDFS-9235. hdfs-native-client build getting errors when built with cmake
 2.6. (Eric Payne via wheat9)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 424f963..c14ce20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -70,10 +70,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_WEBHDFS_NETTY_HIGH_WATERMARK =
   "dfs.webhdfs.netty.high.watermark";
   public static final int  DFS_WEBHDFS_NETTY_HIGH_WATERMARK_DEFAULT = 65535;
-  public static final String  DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_KEY =
-  "dfs.webhdfs.ugi.expire.after.access";
-  public static final int DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_DEFAULT =
-  10*60*1000; //10 minutes
 
   // HA related configuration
   public static final String  DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = 

[48/50] [abbrv] hadoop git commit: HDFS-9363. Add fetchReplica to FsDatasetTestUtils to return FsDataset-agnostic replica. (Tony Wu via lei)

2015-11-04 Thread aengineer
HDFS-9363. Add fetchReplica to FsDatasetTestUtils to return FsDataset-agnostic 
replica. (Tony Wu via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56671292
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56671292
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56671292

Branch: refs/heads/HDFS-7240
Commit: 5667129276c3123ecb0a96b78d5897431c47a9d5
Parents: 0fb1867
Author: Lei Xu 
Authored: Wed Nov 4 10:46:19 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:49:28 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestPipelines.java   | 6 ++
 .../hadoop/hdfs/server/datanode/FsDatasetTestUtils.java   | 7 +++
 .../datanode/fsdataset/impl/FsDatasetImplTestUtils.java   | 5 +
 .../datanode/fsdataset/impl/TestInterDatanodeProtocol.java| 5 +++--
 5 files changed, 20 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56671292/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5f3ff11..ef1152e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1623,6 +1623,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account 
for
 filesystem entirely allocated for DFS use. (Tony Wu via lei)
 
+HDFS-9363. Add fetchReplica() to FsDatasetTestUtils to return 
FsDataset-agnostic
+replica. (Tony Wu via lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56671292/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
index e4fea60..c9831b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
@@ -102,10 +101,9 @@ public class TestPipelines {
 List lb = cluster.getNameNodeRpc().getBlockLocations(
   filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
 
-String bpid = cluster.getNamesystem().getBlockPoolId();
 for (DataNode dn : cluster.getDataNodes()) {
-  Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
-  .getBlock().getBlockId());
+  Replica r =
+  cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
 
   assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
   assertEquals("Should be RBW replica on " + dn

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56671292/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
index 40c4438..02af467 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
@@ -206,4 +206,11 @@ public interface FsDatasetTestUtils {
* @throws IOException on I/O error.
*/
   void injectCorruptReplica(ExtendedBlock block) throws IOException;
+
+  /**
+   * Get the replica of a block. Returns null if it does not exist.
+   * @param block the block whose replica will be returned.
+   * @return Replica for the block.
+   */
+  Replica fetchReplica(ExtendedBlock block);
 }


[47/50] [abbrv] hadoop git commit: HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics fails intermittently due to assumption that a lease error will be thrown. Contributed by Gaur

2015-11-04 Thread aengineer
HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics fails 
intermittently due to assumption that a lease error will be thrown. Contributed 
by Gaurav Kanade.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fb1867f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fb1867f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fb1867f

Branch: refs/heads/HDFS-7240
Commit: 0fb1867fd62b5df664ad66386d6067db8fbf2317
Parents: e2a5441
Author: cnauroth 
Authored: Wed Nov 4 10:19:04 2015 -0800
Committer: cnauroth 
Committed: Wed Nov 4 10:28:44 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 
 .../TestAzureFileSystemInstrumentation.java | 25 +---
 2 files changed, 21 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb1867f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index efb73f4..dd70947 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1320,6 +1320,10 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. (cnauroth)
 
+HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics
+fails intermittently due to assumption that a lease error will be thrown.
+(Gaurav Kanade via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb1867f/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
index 896ec1b..0c9126c 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
 import org.apache.hadoop.fs.azure.AzureException;
 import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
 import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.hamcrest.BaseMatcher;
@@ -405,22 +406,30 @@ public class TestAzureFileSystemInstrumentation {
 
   @Test
   public void testClientErrorMetrics() throws Exception {
-String directoryName = "metricsTestDirectory_ClientError";
-Path directoryPath = new Path("/" + directoryName);
-assertTrue(fs.mkdirs(directoryPath));
-String leaseID = testAccount.acquireShortLease(directoryName);
+String fileName = "metricsTestFile_ClientError";
+Path filePath = new Path("/"+fileName);
+final int FILE_SIZE = 100;
+OutputStream outputStream = null;
+String leaseID = null;
 try {
+  // Create a file
+  outputStream = fs.create(filePath);
+  leaseID = testAccount.acquireShortLease(fileName);
   try {
-fs.delete(directoryPath, true);
-assertTrue("Should've thrown.", false);
+outputStream.write(new byte[FILE_SIZE]);
+outputStream.close();
+assertTrue("Should've thrown", false);
   } catch (AzureException ex) {
 assertTrue("Unexpected exception: " + ex,
-ex.getMessage().contains("lease"));
+  ex.getMessage().contains("lease"));
   }
   assertEquals(1, 
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), 
WASB_CLIENT_ERRORS));
   assertEquals(0, 
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), 
WASB_SERVER_ERRORS));
 } finally {
-  testAccount.releaseLease(leaseID, directoryName);
+  if(leaseID != null){
+testAccount.releaseLease(leaseID, fileName);
+  }
+  IOUtils.closeStream(outputStream);
 }
   }
 



[33/50] [abbrv] hadoop git commit: YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer binds to default port 8188. (Meng Ding via wangda)

2015-11-04 Thread aengineer
YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer 
binds to default port 8188. (Meng Ding via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0783184f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0783184f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0783184f

Branch: refs/heads/HDFS-7240
Commit: 0783184f4b3f669f7211e42b395b62d63144100d
Parents: 957f031
Author: Wangda Tan 
Authored: Tue Nov 3 11:18:34 2015 -0800
Committer: Wangda Tan 
Committed: Tue Nov 3 11:18:34 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../applications/distributedshell/TestDistributedShell.java | 9 +++--
 2 files changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0783184f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1040f45..d6ad672 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1039,6 +1039,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4127. RM fail with noAuth error if switched from failover to 
non-failover.
 (Varun Saxena via jianhe)
 
+YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no 
longer 
+binds to default port 8188. (Meng Ding via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0783184f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index dcb6e72..3197875 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -92,9 +92,14 @@ public class TestDistributedShell {
   yarnCluster.init(conf);
   
   yarnCluster.start();
-  
+
+  conf.set(
+  YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+  MiniYARNCluster.getHostname() + ":"
+  + yarnCluster.getApplicationHistoryServer().getPort());
+
   waitForNMsToRegister();
-  
+
   URL url = 
Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
   if (url == null) {
 throw new RuntimeException("Could not find 'yarn-site.xml' dummy file 
in classpath");



[15/50] [abbrv] hadoop git commit: Updated the 2.6.2 final release date.

2015-11-04 Thread aengineer
Updated the 2.6.2 final release date.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4a6b5b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4a6b5b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4a6b5b4

Branch: refs/heads/HDFS-7240
Commit: a4a6b5b4b470b1e7a3c5e2d38433429c455bc709
Parents: b24fe06
Author: Sangjin Lee 
Authored: Fri Oct 30 18:47:16 2015 -0700
Committer: Sangjin Lee 
Committed: Fri Oct 30 18:47:16 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4a6b5b4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a1409f8..2560fe5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2170,7 +2170,7 @@ Release 2.6.3 - UNRELEASED
 
   BUG FIXES
 
-Release 2.6.2 - 2015-10-21
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4a6b5b4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 17df171..5a61eed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -3310,7 +3310,7 @@ Release 2.6.3 - UNRELEASED
 
   BUG FIXES
 
-Release 2.6.2 - 2015-10-21
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4a6b5b4/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index e999659..4d6dcb8 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -913,7 +913,7 @@ Release 2.6.3 - UNRELEASED
 MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
 (Junping Du via jlowe)
 
-Release 2.6.2 - 2015-10-21
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4a6b5b4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cc8f5f3..1040f45 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1887,7 +1887,7 @@ Release 2.6.3 - UNRELEASED
 
   BUG FIXES
 
-Release 2.6.2 - 2015-10-21
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 



[39/50] [abbrv] hadoop git commit: HADOOP-10787. Rename/remove non-HADOOP_*, etc from the shell scripts. Contributed by Allen Wittenauer.

2015-11-04 Thread aengineer
HADOOP-10787. Rename/remove non-HADOOP_*, etc from the shell scripts. 
Contributed by Allen Wittenauer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73b9c7b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73b9c7b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73b9c7b8

Branch: refs/heads/HDFS-7240
Commit: 73b9c7b82b0f607a5328ad7dc4170da3ac0c1af3
Parents: 3e1745d
Author: Varun Vasudev 
Authored: Wed Nov 4 15:56:17 2015 +0530
Committer: Varun Vasudev 
Committed: Wed Nov 4 15:56:17 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../hadoop-common/src/main/bin/hadoop   | 15 ++--
 .../hadoop-common/src/main/bin/hadoop-daemon.sh |  6 +-
 .../src/main/bin/hadoop-daemons.sh  |  6 +-
 .../src/main/bin/hadoop-functions.sh| 66 +
 .../src/main/bin/hadoop-layout.sh.example   | 16 ++---
 .../hadoop-common/src/main/bin/rcc  |  4 +-
 .../hadoop-common/src/main/bin/slaves.sh|  6 +-
 .../hadoop-common/src/main/bin/start-all.sh |  6 +-
 .../hadoop-common/src/main/bin/stop-all.sh  |  6 +-
 .../main/conf/hadoop-user-functions.sh.example  | 10 +--
 .../scripts/hadoop_add_common_to_classpath.bats |  4 +-
 .../hadoop_add_to_classpath_toolspath.bats  | 74 
 .../src/test/scripts/hadoop_basic_init.bats |  2 +-
 .../hadoop-kms/src/main/sbin/kms.sh |  6 +-
 .../hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh  |  6 +-
 .../src/main/bin/distribute-exclude.sh  |  4 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |  9 ++-
 .../src/main/bin/refresh-namenodes.sh   |  6 +-
 .../hadoop-hdfs/src/main/bin/start-balancer.sh  |  6 +-
 .../hadoop-hdfs/src/main/bin/start-dfs.sh   |  6 +-
 .../src/main/bin/start-secure-dns.sh|  6 +-
 .../hadoop-hdfs/src/main/bin/stop-balancer.sh   |  6 +-
 .../hadoop-hdfs/src/main/bin/stop-dfs.sh|  6 +-
 .../hadoop-hdfs/src/main/bin/stop-secure-dns.sh |  6 +-
 hadoop-mapreduce-project/bin/mapred | 15 ++--
 .../bin/mr-jobhistory-daemon.sh |  6 +-
 .../hadoop-sls/src/main/bin/rumen2sls.sh|  9 ++-
 hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh  | 12 ++--
 .../hadoop-yarn/bin/start-yarn.sh   |  6 +-
 .../hadoop-yarn/bin/stop-yarn.sh|  6 +-
 hadoop-yarn-project/hadoop-yarn/bin/yarn|  6 +-
 .../hadoop-yarn/bin/yarn-daemon.sh  |  6 +-
 .../hadoop-yarn/bin/yarn-daemons.sh |  6 +-
 34 files changed, 235 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73b9c7b8/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 453efe6..dbf9700 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -32,6 +32,9 @@ Trunk (Unreleased)
 HADOOP-11356. Removed deprecated 
o.a.h.fs.permission.AccessControlException.
 (Li Lu via wheat9)
 
+HADOOP-10787 Rename/remove non-HADOOP_*, etc from the shell scripts.
+(aw via vvasudev)
+
   NEW FEATURES
 
 HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73b9c7b8/hadoop-common-project/hadoop-common/src/main/bin/hadoop
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index ef67cc5..513b0f1 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -47,13 +47,13 @@ function hadoop_usage
 
 # let's locate libexec...
 if [[ -n "${HADOOP_PREFIX}" ]]; then
-  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
   bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
-  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
 fi
 
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
 # shellcheck disable=SC2034
 HADOOP_NEW_CONFIG=true
 if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
@@ -113,8 +113,7 @@ case ${COMMAND} in
   ;;
   archive)
 CLASS=org.apache.hadoop.tools.HadoopArchives
-hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
-hadoop_add_classpath "${TOOL_PATH}"
+hadoop_add_to_classpath_toolspath
   ;;
   checknative)
 

[19/50] [abbrv] hadoop git commit: HADOOP-12047. Indicate preference not to affect input buffers during coding in erasure coder. (Contributed by Kai Zheng)

2015-11-04 Thread aengineer
HADOOP-12047. Indicate preference not to affect input buffers during coding in 
erasure coder. (Contributed by Kai Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e4f8a46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e4f8a46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e4f8a46

Branch: refs/heads/HDFS-7240
Commit: 6e4f8a46c5ce983493cb0ac2234fceafdb3a5613
Parents: 3cde693
Author: Walter Su 
Authored: Mon Nov 2 10:40:14 2015 +0800
Committer: Walter Su 
Committed: Mon Nov 2 10:40:14 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../rawcoder/AbstractRawErasureCoder.java   | 60 ++--
 .../io/erasurecode/rawcoder/CoderOption.java| 43 ++
 .../io/erasurecode/rawcoder/RSRawEncoder.java   | 48 +---
 .../erasurecode/rawcoder/RawErasureCoder.java   | 23 +---
 .../erasurecode/rawcoder/RawErasureDecoder.java | 32 ++-
 .../erasurecode/rawcoder/RawErasureEncoder.java | 35 ++--
 .../hadoop/io/erasurecode/TestCoderBase.java| 34 +--
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 37 ++--
 .../erasurecode/rawcoder/TestXORRawCoder.java   |  1 +
 .../hadoop/hdfs/DFSStripedInputStream.java  |  1 -
 11 files changed, 253 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e4f8a46/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2560fe5..5c8daad 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -608,6 +608,9 @@ Trunk (Unreleased)
   HADOOP-12327. Initialize output buffers with ZERO bytes in erasure coder.
   (Kai Zheng via waltersu4549)
 
+  HADOOP-12047. Indicate preference not to affect input buffers during
+  coding in erasure coder. (Kai Zheng via waltersu4549)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e4f8a46/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index d8a57eb..b195216 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -22,6 +22,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configured;
 
 import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
 
 /**
  * A common class of basic facilities to be shared by encoder and decoder
@@ -36,11 +38,39 @@ public abstract class AbstractRawErasureCoder
   private final int numDataUnits;
   private final int numParityUnits;
   private final int numAllUnits;
+  private final Map coderOptions;
 
   public AbstractRawErasureCoder(int numDataUnits, int numParityUnits) {
 this.numDataUnits = numDataUnits;
 this.numParityUnits = numParityUnits;
 this.numAllUnits = numDataUnits + numParityUnits;
+this.coderOptions = new HashMap<>(3);
+
+coderOptions.put(CoderOption.PREFER_DIRECT_BUFFER, preferDirectBuffer());
+coderOptions.put(CoderOption.ALLOW_CHANGE_INPUTS, false);
+coderOptions.put(CoderOption.ALLOW_VERBOSE_DUMP, false);
+  }
+
+  @Override
+  public Object getCoderOption(CoderOption option) {
+if (option == null) {
+  throw new HadoopIllegalArgumentException("Invalid option");
+}
+return coderOptions.get(option);
+  }
+
+  @Override
+  public void setCoderOption(CoderOption option, Object value) {
+if (option == null || value == null) {
+  throw new HadoopIllegalArgumentException(
+  "Invalid option or option value");
+}
+if (option.isReadOnly()) {
+  throw new HadoopIllegalArgumentException(
+  "The option is read-only: " + option.name());
+}
+
+coderOptions.put(option, value);
   }
 
   /**
@@ -75,13 +105,35 @@ public abstract class AbstractRawErasureCoder
   }
 
   @Override
-  public boolean preferDirectBuffer() {
+  public void release() {
+// Nothing to do by default
+  }
+
+  /**
+   * Tell if direct buffer is 

[17/50] [abbrv] hadoop git commit: Revert "HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee."

2015-11-04 Thread aengineer
Revert "HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee."

This reverts commit 43539b5ff4ac0874a8a454dc93a2a782b0e0ea8f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fd64167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fd64167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fd64167

Branch: refs/heads/HDFS-7240
Commit: 7fd6416759cbb202ed21b47d28c1587e04a5cdc6
Parents: 2ea4413
Author: yliu 
Authored: Sat Oct 31 16:20:48 2015 +0800
Committer: yliu 
Committed: Sat Oct 31 16:20:48 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ---
 .../blockmanagement/BlockPlacementPolicyDefault.java| 12 
 2 files changed, 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd64167/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 211e7fc..30cdfee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,9 +2201,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd64167/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index f610574..d9b8d60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -659,7 +659,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
-int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -709,17 +708,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (storage == null);
   }
-  // Refresh the node count. If the live node count became smaller,
-  // but it is not reflected in this loop, it may loop forever in case
-  // the replicas/rack cannot be satisfied.
-  if (--refreshCounter == 0) {
-refreshCounter = clusterMap.countNumOfAvailableNodes(scope,
-excludedNodes);
-// It has already gone through enough number of nodes.
-if (refreshCounter <= excludedNodes.size()) {
-  break;
-}
-  }
 }
   
 if (numOfReplicas>0) {



[13/50] [abbrv] hadoop git commit: MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when strategy is dynamic. Contributed by Kuhu Shukla.

2015-11-04 Thread aengineer
MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when 
strategy is dynamic. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2868ca03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2868ca03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2868ca03

Branch: refs/heads/HDFS-7240
Commit: 2868ca0328d908056745223fb38d9a90fd2811ba
Parents: 18727c6
Author: Kihwal Lee 
Authored: Fri Oct 30 14:56:41 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 14:56:41 2015 -0500

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../tools/mapred/lib/DynamicInputChunk.java | 137 +++
 .../tools/mapred/lib/DynamicInputFormat.java|  31 +++--
 .../tools/mapred/lib/DynamicRecordReader.java   |  13 +-
 .../org/apache/hadoop/tools/StubContext.java|   4 +
 .../mapred/lib/TestDynamicInputFormat.java  |  33 -
 6 files changed, 83 insertions(+), 138 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2868ca03/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 32be987..e999659 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -674,6 +674,9 @@ Release 2.7.2 - UNRELEASED
 MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
 (Junping Du via jlowe)
 
+MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when
+strategy is dynamic (Kuhu Shukla via kihwal)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2868ca03/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 8482e7d..9bf8e47 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -20,14 +20,10 @@ package org.apache.hadoop.tools.mapred.lib;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.tools.CopyListingFileStatus;
-import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
@@ -47,72 +43,28 @@ import java.io.IOException;
  */
 class DynamicInputChunk {
   private static Log LOG = LogFactory.getLog(DynamicInputChunk.class);
-
-  private static Configuration configuration;
-  private static Path chunkRootPath;
-  private static String chunkFilePrefix;
-  private static int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
-  private static FileSystem fs;
-
   private Path chunkFilePath;
   private SequenceFileRecordReader reader;
   private SequenceFile.Writer writer;
+  private DynamicInputChunkContext chunkContext;
 
-  private static void initializeChunkInvariants(Configuration config)
-  throws IOException {
-configuration = config;
-Path listingFilePath = new Path(getListingFilePath(configuration));
-chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
-fs = chunkRootPath.getFileSystem(configuration);
-chunkFilePrefix = listingFilePath.getName() + ".chunk.";
-  }
-
-  private static String getListingFilePath(Configuration configuration) {
-final String listingFileString = configuration.get(
-DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
-assert !listingFileString.equals("") : "Listing file not found.";
-return listingFileString;
-  }
-
-  private static boolean areInvariantsInitialized() {
-return chunkRootPath != null;
-  }
-
-  private DynamicInputChunk(String chunkId, Configuration configuration)
+  DynamicInputChunk(String chunkId, DynamicInputChunkContext chunkContext)
   throws IOException {
-if 

[42/50] [abbrv] hadoop git commit: HDFS-9357. NN UI renders icons of decommissioned DN incorrectly. Contributed by Surendra Singh Lilhore.

2015-11-04 Thread aengineer
HDFS-9357. NN UI renders icons of decommissioned DN incorrectly. Contributed by 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0eed886a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0eed886a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0eed886a

Branch: refs/heads/HDFS-7240
Commit: 0eed886a165f5a0850ddbfb1d5f98c7b5e379fb3
Parents: b9d25c3
Author: Haohui Mai 
Authored: Wed Nov 4 09:16:43 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 4 09:16:43 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 4 ++--
 .../hadoop-hdfs/src/main/webapps/static/hadoop.css   | 4 ++--
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eed886a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 530ed2d..bdcc1fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2231,6 +2231,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
+HDFS-9357. NN UI renders icons of decommissioned DN incorrectly.
+(Surendra Singh Lilhore via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eed886a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index e46ce7f..08199fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -285,8 +285,8 @@
   
 In service
 Down
-Decommisioned
-Decommissioned  dead
+Decommissioned
+Decommissioned  dead
   
 
 In operation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eed886a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
index 58c3cb5..2ed5f29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
@@ -235,7 +235,7 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 content: "\e013";
 }
 
-.dfshealth-node-decommisioned:before {
+.dfshealth-node-decommissioned:before {
 color: #eea236;
 content: "\e136";
 }
@@ -245,7 +245,7 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 content: "\e101";
 }
 
-.dfshealth-node-down-decommisioned:before {
+.dfshealth-node-down-decommissioned:before {
 color: #2e6da6;
 content: "\e017";
 }



[18/50] [abbrv] hadoop git commit: HDFS-9343. Empty caller context considered invalid. (Contributed by Mingliang Liu)

2015-11-04 Thread aengineer
HDFS-9343. Empty caller context considered invalid. (Contributed by Mingliang 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cde6931
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cde6931
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cde6931

Branch: refs/heads/HDFS-7240
Commit: 3cde6931cb5055a9d92503f4ecefa35571e7b07f
Parents: 7fd6416
Author: Arpit Agarwal 
Authored: Sun Nov 1 15:35:02 2015 -0800
Committer: Arpit Agarwal 
Committed: Sun Nov 1 15:35:02 2015 -0800

--
 .../java/org/apache/hadoop/ipc/CallerContext.java| 13 -
 .../main/java/org/apache/hadoop/util/ProtoUtil.java  |  2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../hadoop/hdfs/server/namenode/FSNamesystem.java|  8 
 .../hadoop/hdfs/server/namenode/TestAuditLogger.java | 15 ---
 5 files changed, 28 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cde6931/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
index 8be7e35..b197575 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -44,6 +44,7 @@ public class CallerContext {
* {@link 
org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_CALLER_CONTEXT_MAX_SIZE_DEFAULT}
*/
   private final String context;
+
   /** The caller's signature for validation.
*
* The signature is optional. The null or empty signature will be abandoned.
@@ -58,10 +59,6 @@ public class CallerContext {
 this.signature = builder.signature;
   }
 
-  public boolean isValid() {
-return context != null;
-  }
-
   public String getContext() {
 return context;
   }
@@ -71,6 +68,11 @@ public class CallerContext {
 null : Arrays.copyOf(signature, signature.length);
   }
 
+  @InterfaceAudience.Private
+  public boolean isContextValid() {
+return context != null && !context.isEmpty();
+  }
+
   @Override
   public int hashCode() {
 return new HashCodeBuilder().append(context).toHashCode();
@@ -92,9 +94,10 @@ public class CallerContext {
   .isEquals();
 }
   }
+
   @Override
   public String toString() {
-if (!isValid()) {
+if (!isContextValid()) {
   return "";
 }
 String str = context;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cde6931/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
index 4bfcd66..1a5acba 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
@@ -180,7 +180,7 @@ public abstract class ProtoUtil {
 
 // Add caller context if it is not null
 CallerContext callerContext = CallerContext.getCurrent();
-if (callerContext != null && callerContext.isValid()) {
+if (callerContext != null && callerContext.isContextValid()) {
   RPCCallerContextProto.Builder contextBuilder = RPCCallerContextProto
   .newBuilder().setContext(callerContext.getContext());
   if (callerContext.getSignature() != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cde6931/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 30cdfee..8e6634a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,6 +2201,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
+HDFS-9343. Empty caller context considered invalid. (Mingliang Liu via
+Arpit Agarwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cde6931/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff 

[04/50] [abbrv] hadoop git commit: Add an entry of YARN-4312 to CHANGES.txt

2015-11-04 Thread aengineer
Add an entry of YARN-4312 to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d21214ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d21214ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d21214ce

Branch: refs/heads/HDFS-7240
Commit: d21214ce33cb176926aa3ae5a9f4efe00f66480b
Parents: f072eb5
Author: Tsuyoshi Ozawa 
Authored: Fri Oct 30 17:56:59 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Fri Oct 30 17:56:59 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d21214ce/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 78c18d5..2151136 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1147,6 +1147,9 @@ Release 2.7.2 - UNRELEASED
 YARN-3580. [JDK8] TestClientRMService.testGetLabelsToNodes fails. (Robert 
Kanter
 via junping_du)
 
+YARN-4312. TestSubmitApplicationWithRMHA fails on branch-2.7 and branch-2.6
+as some of the test cases time out. (Varun Saxena via ozawa)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[03/50] [abbrv] hadoop git commit: HDFS-9323. Randomize the DFSStripedOutputStreamWithFailure tests.

2015-11-04 Thread aengineer
HDFS-9323. Randomize the DFSStripedOutputStreamWithFailure tests.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f072eb5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f072eb5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f072eb5a

Branch: refs/heads/HDFS-7240
Commit: f072eb5a206d34d8af39d65c3ef1f39faaebfdd0
Parents: d2e01f4
Author: Tsz-Wo Nicholas Sze 
Authored: Fri Oct 30 15:38:38 2015 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Fri Oct 30 15:41:03 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../TestDFSStripedOutputStreamWithFailure.java  | 33 +---
 ...estDFSStripedOutputStreamWithFailure020.java | 22 +
 ...estDFSStripedOutputStreamWithFailure030.java | 22 +
 ...estDFSStripedOutputStreamWithFailure040.java | 22 +
 ...estDFSStripedOutputStreamWithFailure050.java | 22 +
 ...estDFSStripedOutputStreamWithFailure060.java | 22 +
 ...estDFSStripedOutputStreamWithFailure070.java | 22 +
 ...estDFSStripedOutputStreamWithFailure080.java | 22 +
 ...estDFSStripedOutputStreamWithFailure090.java | 22 +
 ...estDFSStripedOutputStreamWithFailure100.java | 22 +
 ...estDFSStripedOutputStreamWithFailure110.java | 22 +
 ...estDFSStripedOutputStreamWithFailure120.java | 22 +
 ...estDFSStripedOutputStreamWithFailure130.java | 22 +
 ...estDFSStripedOutputStreamWithFailure140.java | 22 +
 ...estDFSStripedOutputStreamWithFailure150.java | 22 +
 ...estDFSStripedOutputStreamWithFailure160.java | 22 +
 ...estDFSStripedOutputStreamWithFailure170.java | 22 +
 ...estDFSStripedOutputStreamWithFailure180.java | 22 +
 ...estDFSStripedOutputStreamWithFailure190.java | 22 +
 ...estDFSStripedOutputStreamWithFailure200.java | 22 +
 ...estDFSStripedOutputStreamWithFailure210.java | 23 ++
 22 files changed, 458 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f072eb5a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fd1d6de..38b9e55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -187,6 +187,8 @@ Trunk (Unreleased)
 HDFS-9261. Erasure Coding: Skip encoding the data cells if all the parity 
data 
 streamers are failed for the current block group. (Rakesh R via umamahesh)
 
+HDFS-9323. Randomize the DFSStripedOutputStreamWithFailure tests. 
(szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f072eb5a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index 7bd976f..b60d0f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -131,15 +131,16 @@ public class TestDFSStripedOutputStreamWithFailure {
 
   private static final List LENGTHS = newLengths();
 
-  static int getLength(int i) {
-return LENGTHS.get(i);
+  static Integer getLength(int i) {
+return i >= 0 && i < LENGTHS.size()? LENGTHS.get(i): null;
   }
 
+  private static final Random RANDOM = new Random();
+
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
   private final Path dir = new Path("/"
   + TestDFSStripedOutputStreamWithFailure.class.getSimpleName());
-  private final Random random = new Random();
 
   private void setup(Configuration conf) throws IOException {
 final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
@@ -167,19 +168,6 @@ public class TestDFSStripedOutputStreamWithFailure {
   }
 
   @Test(timeout=24)
-  public void testDatanodeFailure56() throws Exception {
-runTest(getLength(56));
-  }
-
-  @Test(timeout=24)
-  public void testDatanodeFailureRandomLength() throws Exception {
-int lenIndex = random.nextInt(LENGTHS.size());
-LOG.info("run testMultipleDatanodeFailureRandomLength with length index: "
-+ lenIndex);
-runTest(getLength(lenIndex));

[29/50] [abbrv] hadoop git commit: HDFS-9308. Add truncateMeta() and deleteMeta() to MiniDFSCluster. (Tony Wu via lei)

2015-11-04 Thread aengineer
HDFS-9308. Add truncateMeta() and deleteMeta() to MiniDFSCluster. (Tony Wu via 
lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e05dbf2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e05dbf2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e05dbf2

Branch: refs/heads/HDFS-7240
Commit: 8e05dbf2bddce95d5f5a5bae5df61acabf0ba7c5
Parents: 5ba2b98
Author: Lei Xu 
Authored: Mon Nov 2 18:05:43 2015 -0800
Committer: Lei Xu 
Committed: Mon Nov 2 18:05:43 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  22 
 .../apache/hadoop/hdfs/TestCrcCorruption.java   | 120 +--
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   |  35 --
 4 files changed, 76 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e05dbf2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3c60549..19ea5c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1661,6 +1661,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)
 
+HDFS-9308. Add truncateMeta() and deleteMeta() to MiniDFSCluster. (Tony Wu 
via lei)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e05dbf2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 7ebf333..c81f154 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -2117,6 +2117,28 @@ public class MiniDFSCluster {
 getMaterializedReplica(i, blk).corruptMeta();
   }
 
+  /**
+   * Corrupt the metadata of a block by deleting it.
+   * @param i index of the datanode
+   * @param blk name of the block.
+   */
+  public void deleteMeta(int i, ExtendedBlock blk)
+  throws IOException {
+getMaterializedReplica(i, blk).deleteMeta();
+  }
+
+  /**
+   * Corrupt the metadata of a block by truncating it to a new size.
+   * @param i index of the datanode.
+   * @param blk name of the block.
+   * @param newSize the new size of the metadata file.
+   * @throws IOException if any I/O errors.
+   */
+  public void truncateMeta(int i, ExtendedBlock blk, int newSize)
+  throws IOException {
+getMaterializedReplica(i, blk).truncateMeta(newSize);
+  }
+
   public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
   long newGenStamp) throws IOException {
 File blockFile = getBlockFile(dnIndex, blk);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e05dbf2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
index 3850ff2..398bcc2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
@@ -22,11 +22,8 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
+import java.util.List;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
@@ -35,12 +32,15 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.slf4j.Logger;

[07/50] [abbrv] hadoop git commit: fix CHANGES.txt

2015-11-04 Thread aengineer
fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c0204a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c0204a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c0204a5

Branch: refs/heads/HDFS-7240
Commit: 3c0204a5866520e74917b26b6ac2061650a5bb6d
Parents: 43539b5
Author: Kihwal Lee 
Authored: Fri Oct 30 09:40:41 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 09:40:41 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c0204a5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f6a22a1..c5846b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,9 +2201,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -2276,6 +2273,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[14/50] [abbrv] hadoop git commit: Addendum to MAPREDUCE-6451

2015-11-04 Thread aengineer
Addendum to MAPREDUCE-6451


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b24fe064
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b24fe064
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b24fe064

Branch: refs/heads/HDFS-7240
Commit: b24fe0648348d325d14931f80cee8a170fb3358a
Parents: 2868ca0
Author: Kihwal Lee 
Authored: Fri Oct 30 16:05:23 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 16:05:23 2015 -0500

--
 .../mapred/lib/DynamicInputChunkContext.java| 113 +++
 1 file changed, 113 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b24fe064/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
new file mode 100644
index 000..043ff1c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools.mapred.lib;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.tools.DistCpConstants;
+
+import java.io.IOException;
+
+/**
+ * Class to initialize the DynamicInputChunk invariants.
+ */
+class DynamicInputChunkContext {
+
+  private static Log LOG = LogFactory.getLog(DynamicInputChunkContext.class);
+  private Configuration configuration;
+  private Path chunkRootPath = null;
+  private String chunkFilePrefix;
+  private FileSystem fs;
+  private int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
+
+  public DynamicInputChunkContext(Configuration config)
+  throws IOException {
+this.configuration = config;
+Path listingFilePath = new Path(getListingFilePath(configuration));
+chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
+fs = chunkRootPath.getFileSystem(configuration);
+chunkFilePrefix = listingFilePath.getName() + ".chunk.";
+  }
+
+  public Configuration getConfiguration() {
+return configuration;
+  }
+
+  public Path getChunkRootPath() {
+return chunkRootPath;
+  }
+
+  public String getChunkFilePrefix() {
+return chunkFilePrefix;
+  }
+
+  public FileSystem getFs() {
+return fs;
+  }
+
+  private static String getListingFilePath(Configuration configuration) {
+final String listingFileString = configuration.get(
+DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
+assert !listingFileString.equals("") : "Listing file not found.";
+return listingFileString;
+  }
+
+  public int getNumChunksLeft() {
+return numChunksLeft;
+  }
+
+  public DynamicInputChunk acquire(TaskAttemptContext taskAttemptContext)
+  throws IOException, InterruptedException {
+
+String taskId
+= taskAttemptContext.getTaskAttemptID().getTaskID().toString();
+Path acquiredFilePath = new Path(getChunkRootPath(), taskId);
+
+if (fs.exists(acquiredFilePath)) {
+  LOG.info("Acquiring pre-assigned chunk: " + acquiredFilePath);
+  return new DynamicInputChunk(acquiredFilePath, taskAttemptContext, this);
+}
+
+for (FileStatus chunkFile : getListOfChunkFiles()) {
+  if (fs.rename(chunkFile.getPath(), acquiredFilePath)) {
+LOG.info(taskId + " acquired " + chunkFile.getPath());
+return new DynamicInputChunk(acquiredFilePath, taskAttemptContext,
+this);
+  }
+}
+return null;
+  }
+
+  public 

[09/50] [abbrv] hadoop git commit: Update CHANGES.txt to reflect commit of MR-6273 to branch-2.6

2015-11-04 Thread aengineer
Update CHANGES.txt to reflect commit of MR-6273 to branch-2.6


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ae9efaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ae9efaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ae9efaf

Branch: refs/heads/HDFS-7240
Commit: 6ae9efaf5949bd5a5f4fd99b5777ce8f6d7f3a2c
Parents: eadf7b3
Author: Jason Lowe 
Authored: Fri Oct 30 15:18:53 2015 +
Committer: Jason Lowe 
Committed: Fri Oct 30 15:18:53 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae9efaf/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 8594e1e..22f9e89 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -900,6 +900,10 @@ Release 2.6.3 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6273. HistoryFileManager should check whether summaryFile exists 
to 
+avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state.
+(zhihai xu via devaraj)
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES



[02/50] [abbrv] hadoop git commit: Move YARN-3580 in CHANGES.txt from 2.8 to 2.7.2.

2015-11-04 Thread aengineer
Move YARN-3580 in CHANGES.txt from 2.8 to 2.7.2.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2e01f4e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2e01f4e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2e01f4e

Branch: refs/heads/HDFS-7240
Commit: d2e01f4ed87c3c41156ec9a68855f923f8c0adf9
Parents: 7412ff4
Author: Tsuyoshi Ozawa 
Authored: Fri Oct 30 15:49:06 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Fri Oct 30 15:49:06 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2e01f4e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d0fa27d..78c18d5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -689,9 +689,6 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3582. NPE in WebAppProxyServlet. (jian he via xgong)
 
-YARN-3580. [JDK8] TestClientRMService.testGetLabelsToNodes fails. (Robert 
Kanter
-via junping_du)
-
 YARN-3577. Misspelling of threshold in log4j.properties for tests.
 (Brahma Reddy Battula via aajisaka)
 
@@ -1147,6 +1144,9 @@ Release 2.7.2 - UNRELEASED
 YARN-4313. Race condition in MiniMRYarnCluster when getting history server
 address. (Jian He via xgong)
 
+YARN-3580. [JDK8] TestClientRMService.testGetLabelsToNodes fails. (Robert 
Kanter
+via junping_du)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[30/50] [abbrv] hadoop git commit: HDFS-9313. Possible NullPointerException in BlockManager if no excess replica can be chosen. (mingma)

2015-11-04 Thread aengineer
HDFS-9313. Possible NullPointerException in BlockManager if no excess replica 
can be chosen. (mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d565480d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d565480d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d565480d

Branch: refs/heads/HDFS-7240
Commit: d565480da2f646b40c3180e1ccb2935c9863dfef
Parents: 8e05dbf
Author: Ming Ma 
Authored: Mon Nov 2 19:36:37 2015 -0800
Committer: Ming Ma 
Committed: Mon Nov 2 19:36:37 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../blockmanagement/BlockPlacementPolicy.java   |  8 +++--
 .../BlockPlacementPolicyDefault.java|  6 
 .../blockmanagement/TestReplicationPolicy.java  | 31 
 4 files changed, 45 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d565480d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 19ea5c1..879c015 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2216,6 +2216,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9329. TestBootstrapStandby#testRateThrottling is flaky because fsimage
 size is smaller than IO buffer size. (zhz)
 
+HDFS-9313. Possible NullPointerException in BlockManager if no excess
+replica can be chosen. (mingma)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d565480d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index be169c3..526a5d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -23,8 +23,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
@@ -33,13 +31,17 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /** 
  * This interface is used for choosing the desired number of targets
  * for placing block replicas.
  */
 @InterfaceAudience.Private
 public abstract class BlockPlacementPolicy {
-  static final Log LOG = LogFactory.getLog(BlockPlacementPolicy.class);
+  static final Logger LOG = LoggerFactory.getLogger(
+  BlockPlacementPolicy.class);
 
   @InterfaceAudience.Private
   public static class NotEnoughReplicasException extends Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d565480d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d9b8d60..2723ed9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -981,6 +981,12 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 excessTypes);
   }
   firstOne = false;
+  if (cur == null) {
+LOG.warn("No excess replica can be found. excessTypes: {}." +
+" moreThanOne: {}. exactlyOne: {}.", excessTypes, moreThanOne,
+exactlyOne);
+break;
+  }
 
   // adjust rackmap, moreThanOne, and exactlyOne
   adjustSetsWithChosenReplica(rackMap, moreThanOne, exactlyOne, cur);


[28/50] [abbrv] hadoop git commit: HDFS-9275. Wait previous ErasureCodingWork to finish before schedule another one. (Walter Su via yliu)

2015-11-04 Thread aengineer
HDFS-9275. Wait previous ErasureCodingWork to finish before schedule another 
one. (Walter Su via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ba2b98d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ba2b98d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ba2b98d

Branch: refs/heads/HDFS-7240
Commit: 5ba2b98d0fe29603e136fc43a14f853e820cf7e2
Parents: 7632409
Author: yliu 
Authored: Tue Nov 3 09:14:32 2015 +0800
Committer: yliu 
Committed: Tue Nov 3 09:14:32 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/BlockManager.java|   5 +
 .../apache/hadoop/hdfs/StripedFileTestUtil.java |   8 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   2 +
 .../TestReadStripedFileWithMissingBlocks.java   |   6 +-
 .../hadoop/hdfs/TestRecoverStripedFile.java | 143 ++-
 .../hdfs/TestSafeModeWithStripedFile.java   |   5 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |   8 +-
 .../hdfs/TestWriteStripedFileWithFailure.java   |   6 +-
 .../TestBlockTokenWithDFSStriped.java   |   4 +-
 .../namenode/TestRecoverStripedBlocks.java  |  70 +
 11 files changed, 145 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ba2b98d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c13a725..3c60549 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -849,6 +849,9 @@ Trunk (Unreleased)
   HDFS-8438. Erasure Coding: Allow concat striped files if they have the 
same
   ErasureCodingPolicy. (Walter Su via jing9)
 
+  HDFS-9275. Wait previous ErasureCodingWork to finish before schedule
+  another one. (Walter Su via yliu)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ba2b98d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 897df1e..dbe0726 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1586,6 +1586,10 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 if (block.isStriped()) {
+  if (pendingNum > 0) {
+// Wait the previous recovery to finish.
+return null;
+  }
   short[] indices = new short[liveBlockIndices.size()];
   for (int i = 0 ; i < liveBlockIndices.size(); i++) {
 indices[i] = liveBlockIndices.get(i);
@@ -1641,6 +1645,7 @@ public class BlockManager implements BlockStatsMXBean {
 if (block.isStriped()) {
   assert rw instanceof ErasureCodingWork;
   assert rw.getTargets().length > 0;
+  assert pendingNum == 0: "Should wait the previous recovery to finish";
   String src = getBlockCollection(block).getName();
   ErasureCodingPolicy ecPolicy = null;
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ba2b98d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index cc6e7d3..9942a2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -61,10 +61,10 @@ public class StripedFileTestUtil {
   public static final int BLOCK_STRIPED_CELL_SIZE = 64 * 1024;
   public static final int BLOCK_STRIPE_SIZE = BLOCK_STRIPED_CELL_SIZE * 
NUM_DATA_BLOCKS;
 
-  static final int stripesPerBlock = 4;
-  static final int blockSize = BLOCK_STRIPED_CELL_SIZE * stripesPerBlock;
-  static final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
-  static final int BLOCK_GROUP_SIZE = blockSize * NUM_DATA_BLOCKS;
+  public static final int stripesPerBlock = 4;
+  public static final int blockSize = BLOCK_STRIPED_CELL_SIZE * 

[32/50] [abbrv] hadoop git commit: HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. Contributed by Chris Nauroth.

2015-11-04 Thread aengineer
HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. Contributed by Chris 
Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/957f0311
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/957f0311
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/957f0311

Branch: refs/heads/HDFS-7240
Commit: 957f0311a160afb40dbb0619f455445b4f5d1e32
Parents: 6e0d353
Author: cnauroth 
Authored: Mon Nov 2 22:25:05 2015 -0800
Committer: cnauroth 
Committed: Mon Nov 2 22:25:05 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../src/test/java/org/apache/hadoop/net/TestDNS.java| 12 +---
 2 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/957f0311/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b055069..0d1bce2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1312,6 +1312,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12508. delete fails with exception when lease is held on blob.
 (Gaurav Kanade via cnauroth)
 
+HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. (cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/957f0311/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
index b26c7ca..a0bfe73 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
@@ -30,6 +30,7 @@ import javax.naming.NameNotFoundException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Time;
 
 import org.junit.Test;
@@ -37,6 +38,7 @@ import org.junit.Test;
 import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
 
 /**
  * Test host name and IP resolution and caching.
@@ -185,13 +187,17 @@ public class TestDNS {
*
* This test may fail on some misconfigured test machines that don't have
* an entry for "localhost" in their hosts file. This entry is correctly
-   * configured out of the box on common Linux distributions, OS X and
-   * Windows.
+   * configured out of the box on common Linux distributions and OS X.
+   *
+   * Windows refuses to resolve 127.0.0.1 to "localhost" despite the presence 
of
+   * this entry in the hosts file.  We skip the test on Windows to avoid
+   * reporting a spurious failure.
*
* @throws Exception
*/
   @Test (timeout=6)
   public void testLookupWithHostsFallback() throws Exception {
+assumeTrue(!Shell.WINDOWS);
 final String oldHostname = changeDnsCachedHostname(DUMMY_HOSTNAME);
 
 try {
@@ -231,7 +237,7 @@ public class TestDNS {
 
   private String getLoopbackInterface() throws SocketException {
 return NetworkInterface.getByInetAddress(
-InetAddress.getLoopbackAddress()).getDisplayName();
+InetAddress.getLoopbackAddress()).getName();
   }
 
   /**



[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into hdfs-7240

2015-11-04 Thread aengineer
Merge branch 'trunk' into hdfs-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/312d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/312d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/312d

Branch: refs/heads/HDFS-7240
Commit: 312deac6781bd15a5e1a46e2007243bf5186
Parents: b14a70e 5667129
Author: Anu Engineer 
Authored: Wed Nov 4 14:19:34 2015 -0800
Committer: Anu Engineer 
Committed: Wed Nov 4 14:19:34 2015 -0800

--
 .gitignore  |1 +
 LICENSE.txt |   59 +
 dev-support/docker/Dockerfile   |7 +-
 dev-support/test-patch.sh   |   10 +-
 .../main/resources/assemblies/hadoop-dist.xml   |4 +-
 .../assemblies/hadoop-hdfs-nfs-dist.xml |4 +-
 .../resources/assemblies/hadoop-httpfs-dist.xml |4 +-
 .../resources/assemblies/hadoop-kms-dist.xml|4 +-
 .../assemblies/hadoop-mapreduce-dist.xml|4 +-
 .../resources/assemblies/hadoop-nfs-dist.xml|4 +-
 .../main/resources/assemblies/hadoop-sls.xml|4 +-
 .../main/resources/assemblies/hadoop-src.xml|4 +-
 .../main/resources/assemblies/hadoop-tools.xml  |4 +-
 .../resources/assemblies/hadoop-yarn-dist.xml   |4 +-
 hadoop-client/pom.xml   |6 +-
 .../JWTRedirectAuthenticationHandler.java   |7 +-
 .../server/KerberosAuthenticationHandler.java   |4 +-
 .../TestJWTRedirectAuthentictionHandler.java|   42 +-
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   74 -
 hadoop-common-project/hadoop-common/CHANGES.txt |  270 +-
 hadoop-common-project/hadoop-common/pom.xml |5 +
 .../hadoop-common/src/main/bin/hadoop   |   15 +-
 .../hadoop-common/src/main/bin/hadoop-daemon.sh |6 +-
 .../src/main/bin/hadoop-daemons.sh  |6 +-
 .../src/main/bin/hadoop-functions.sh|  109 +-
 .../src/main/bin/hadoop-layout.sh.example   |   16 +-
 .../hadoop-common/src/main/bin/rcc  |4 +-
 .../hadoop-common/src/main/bin/slaves.sh|6 +-
 .../hadoop-common/src/main/bin/start-all.sh |6 +-
 .../hadoop-common/src/main/bin/stop-all.sh  |6 +-
 .../main/conf/hadoop-user-functions.sh.example  |   10 +-
 .../org/apache/hadoop/conf/Configuration.java   |2 +-
 .../fs/CommonConfigurationKeysPublic.java   |   11 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |   26 +-
 .../java/org/apache/hadoop/fs/FileUtil.java |   29 -
 .../org/apache/hadoop/fs/FilterFileSystem.java  |8 +-
 .../java/org/apache/hadoop/fs/GlobFilter.java   |2 +-
 .../java/org/apache/hadoop/fs/GlobPattern.java  |7 +-
 .../main/java/org/apache/hadoop/fs/Globber.java |2 +-
 .../org/apache/hadoop/fs/HarFileSystem.java |6 +
 .../java/org/apache/hadoop/fs/HardLink.java |8 +
 .../org/apache/hadoop/fs/LocalDirAllocator.java |6 +-
 .../apache/hadoop/fs/shell/CopyCommands.java|6 +-
 .../java/org/apache/hadoop/fs/shell/Delete.java |2 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |   53 +-
 .../org/apache/hadoop/ha/HAServiceTarget.java   |   50 +-
 .../org/apache/hadoop/ha/HealthMonitor.java |2 +-
 .../org/apache/hadoop/http/HttpServer2.java |2 +
 .../java/org/apache/hadoop/io/SequenceFile.java |   15 +-
 .../org/apache/hadoop/io/WritableUtils.java |8 +-
 .../apache/hadoop/io/erasurecode/CodecUtil.java |2 +
 .../apache/hadoop/io/erasurecode/ECBlock.java   |3 +
 .../hadoop/io/erasurecode/ECBlockGroup.java |3 +
 .../apache/hadoop/io/erasurecode/ECChunk.java   |3 +
 .../apache/hadoop/io/erasurecode/ECSchema.java  |5 +
 .../erasurecode/codec/AbstractErasureCodec.java |2 +
 .../io/erasurecode/codec/ErasureCodec.java  |2 +
 .../io/erasurecode/codec/RSErasureCodec.java|2 +
 .../io/erasurecode/codec/XORErasureCodec.java   |2 +
 .../erasurecode/coder/AbstractErasureCoder.java |2 +
 .../coder/AbstractErasureCodingStep.java|2 +
 .../coder/AbstractErasureDecoder.java   |   25 +-
 .../coder/AbstractErasureEncoder.java   |2 +
 .../io/erasurecode/coder/ErasureCoder.java  |2 +
 .../io/erasurecode/coder/ErasureCodingStep.java |2 +
 .../erasurecode/coder/ErasureDecodingStep.java  |2 +
 .../erasurecode/coder/ErasureEncodingStep.java  |2 +
 .../io/erasurecode/coder/RSErasureDecoder.java  |2 +
 .../io/erasurecode/coder/RSErasureEncoder.java  |2 +
 .../io/erasurecode/coder/XORErasureDecoder.java |2 +
 .../io/erasurecode/coder/XORErasureEncoder.java |2 +
 .../io/erasurecode/grouper/BlockGrouper.java|2 +
 .../rawcoder/AbstractRawErasureCoder.java   |  114 +-
 .../rawcoder/AbstractRawErasureDecoder.java |   10 +-
 

[27/50] [abbrv] hadoop git commit: HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)

2015-11-04 Thread aengineer
HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76324094
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76324094
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76324094

Branch: refs/heads/HDFS-7240
Commit: 7632409482aaf06ecc6fe370a9f519afb969ad30
Parents: 78d6890
Author: Lei Xu 
Authored: Mon Nov 2 17:09:39 2015 -0800
Committer: Lei Xu 
Committed: Mon Nov 2 17:09:39 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/TestReplication.java | 115 +--
 .../server/datanode/FsDatasetTestUtils.java |   7 ++
 .../fsdataset/impl/FsDatasetImplTestUtils.java  |  25 
 4 files changed, 64 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76324094/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fea4106..c13a725 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1656,6 +1656,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9168. Move client side unit test to hadoop-hdfs-client. (wheat9)
 
+HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76324094/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index 6424bc3..d9c96ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -20,22 +20,14 @@ package org.apache.hadoop.hdfs;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import com.google.common.base.Supplier;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
-import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
-import java.nio.file.FileVisitResult;
-import java.nio.file.Files;
-import java.nio.file.SimpleFileVisitor;
-import java.nio.file.attribute.BasicFileAttributes;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
@@ -50,7 +42,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -62,6 +53,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import 
org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -367,7 +359,7 @@ public class TestReplication {
 for (int i=0; i();
+  for (int dnIndex=0; dnIndex<3; dnIndex++) {
+replicas.add(cluster.getMaterializedReplica(dnIndex, block));
   }
-  
+  assertEquals(3, replicas.size());
+
+

[35/50] [abbrv] hadoop git commit: HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line endings, fails on Windows. Contributed by Chris Nauroth.

2015-11-04 Thread aengineer
HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line 
endings, fails on Windows. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e282966
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e282966
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e282966

Branch: refs/heads/HDFS-7240
Commit: 7e2829662b4c4bf33ebaf2fa09312d0bed3d6f92
Parents: 095ac83
Author: cnauroth 
Authored: Tue Nov 3 11:54:57 2015 -0800
Committer: cnauroth 
Committed: Tue Nov 3 11:54:57 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hdfs/server/namenode/TestAuditLogger.java   | 24 
 2 files changed, 18 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e282966/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1729b73..fbf211f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -,6 +,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9354. Fix TestBalancer#testBalancerWithZeroThreadsForMove on Windows.
 (Xiaoyu Yao via cnauroth)
 
+HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
+endings, fails on Windows. (cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e282966/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index 252f7af..d637abc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -243,7 +243,8 @@ public class TestAuditLogger {
   CallerContext.setCurrent(context);
   LOG.info("Set current caller context as {}", CallerContext.getCurrent());
   fs.setTimes(p, time, time);
-  assertTrue(auditlog.getOutput().endsWith("callerContext=setTimes\n"));
+  assertTrue(auditlog.getOutput().endsWith(
+  String.format("callerContext=setTimes%n")));
   auditlog.clearOutput();
 
   // context with signature
@@ -254,7 +255,7 @@ public class TestAuditLogger {
   LOG.info("Set current caller context as {}", CallerContext.getCurrent());
   fs.setTimes(p, time, time);
   assertTrue(auditlog.getOutput().endsWith(
-  "callerContext=setTimes:L\n"));
+  String.format("callerContext=setTimes:L%n")));
   auditlog.clearOutput();
 
   // long context is truncated
@@ -266,7 +267,7 @@ public class TestAuditLogger {
   LOG.info("Set current caller context as {}", CallerContext.getCurrent());
   fs.setTimes(p, time, time);
   assertTrue(auditlog.getOutput().endsWith(
-  "callerContext=" + longContext.substring(0, 128) + ":L\n"));
+  String.format("callerContext=%s:L%n", longContext.substring(0, 
128;
   auditlog.clearOutput();
 
   // empty context is ignored
@@ -302,7 +303,8 @@ public class TestAuditLogger {
   } catch (InterruptedException ignored) {
 // Ignore
   }
-  assertTrue(auditlog.getOutput().endsWith("callerContext=setTimes:L\n"));
+  assertTrue(auditlog.getOutput().endsWith(
+  String.format("callerContext=setTimes:L%n")));
   auditlog.clearOutput();
 
   // caller context is overridden in child thread
@@ -330,7 +332,7 @@ public class TestAuditLogger {
 // Ignore
   }
   assertTrue(auditlog.getOutput().endsWith(
-  "callerContext=setPermission:L\n"));
+  String.format("callerContext=setPermission:L%n")));
   auditlog.clearOutput();
 
   // reuse the current context's signature
@@ -339,7 +341,8 @@ public class TestAuditLogger {
   CallerContext.setCurrent(context);
   LOG.info("Set current caller context as {}", CallerContext.getCurrent());
   fs.mkdirs(new Path("/reuse-context-signature"));
-  assertTrue(auditlog.getOutput().endsWith("callerContext=mkdirs:L\n"));
+  assertTrue(auditlog.getOutput().endsWith(
+  String.format("callerContext=mkdirs:L%n")));
   auditlog.clearOutput();
 
   // too long signature is ignored
@@ -349,7 +352,8 @@ public class 

[34/50] [abbrv] hadoop git commit: HDFS-9354. Fix TestBalancer#testBalancerWithZeroThreadsForMove on Windows. Contributed by Xiaoyu Yao.

2015-11-04 Thread aengineer
HDFS-9354. Fix TestBalancer#testBalancerWithZeroThreadsForMove on Windows. 
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/095ac834
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/095ac834
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/095ac834

Branch: refs/heads/HDFS-7240
Commit: 095ac834022df6136b42961c507ec745c6cf8f97
Parents: 0783184
Author: cnauroth 
Authored: Tue Nov 3 10:51:21 2015 -0800
Committer: cnauroth 
Committed: Tue Nov 3 11:21:08 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/balancer/TestBalancer.java  | 572 +--
 2 files changed, 277 insertions(+), 298 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ac834/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 879c015..1729b73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2219,6 +2219,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9313. Possible NullPointerException in BlockManager if no excess
 replica can be chosen. (mingma)
 
+HDFS-9354. Fix TestBalancer#testBalancerWithZeroThreadsForMove on Windows.
+(Xiaoyu Yao via cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ac834/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 332ae15..dd54345 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.log4j.Level;
+import org.junit.After;
 import org.junit.Test;
 
 /**
@@ -106,6 +107,14 @@ public class TestBalancer {
   final static Path filePath = new Path(fileName);
   private MiniDFSCluster cluster;
 
+  @After
+  public void shutdown() throws Exception {
+if (cluster != null) {
+  cluster.shutdown();
+  cluster = null;
+}
+  }
+
   ClientProtocol client;
 
   static final long TIMEOUT = 4L; //msec
@@ -367,44 +376,38 @@ public class TestBalancer {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length)
 .hosts(hosts).racks(racks).simulatedCapacities(capacities).build();
 
-try {
-  cluster.waitActive();
-  client = NameNodeProxies.createProxy(conf,
-  cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
-  
-  // fill up the cluster to be 80% full
-  long totalCapacity = sum(capacities);
-  long totalUsedSpace = totalCapacity * 8 / 10;
-  InetSocketAddress[] favoredNodes = new InetSocketAddress[numOfDatanodes];
-  for (int i = 0; i < favoredNodes.length; i++) {
-// DFSClient will attempt reverse lookup. In case it resolves
-// "127.0.0.1" to "localhost", we manually specify the hostname.
-int port = cluster.getDataNodes().get(i).getXferAddress().getPort();
-favoredNodes[i] = new InetSocketAddress(hosts[i], port);
-  }
+cluster.waitActive();
+client = NameNodeProxies.createProxy(conf,
+cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+
+// fill up the cluster to be 80% full
+long totalCapacity = sum(capacities);
+long totalUsedSpace = totalCapacity * 8 / 10;
+InetSocketAddress[] favoredNodes = new InetSocketAddress[numOfDatanodes];
+for (int i = 0; i < favoredNodes.length; i++) {
+  // DFSClient will attempt reverse lookup. In case it resolves
+  // "127.0.0.1" to "localhost", we manually specify the hostname.
+  int port = cluster.getDataNodes().get(i).getXferAddress().getPort();
+  favoredNodes[i] = new InetSocketAddress(hosts[i], port);
+}
 
-  DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024,
-  totalUsedSpace / numOfDatanodes, DEFAULT_BLOCK_SIZE,
-  (short) numOfDatanodes, 0, false, favoredNodes);
-  
-  // start up an empty node with the same capacity
-  cluster.startDataNodes(conf, 1, true, null, new 

[45/50] [abbrv] hadoop git commit: HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via lei)

2015-11-04 Thread aengineer
HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec414600
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec414600
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec414600

Branch: refs/heads/HDFS-7240
Commit: ec414600ede8e305c584818565b50e055ea5d2b5
Parents: 88beb46
Author: Lei Xu 
Authored: Tue Nov 3 14:17:11 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:22:17 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  65 ++-
 .../blockmanagement/BlockPlacementPolicy.java   |  53 --
 .../BlockPlacementPolicyDefault.java|  57 ---
 .../BlockPlacementPolicyWithNodeGroup.java  |  35 ++--
 .../BlockPlacementPolicyWithUpgradeDomain.java  |  84 +++--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   9 +-
 .../hdfs/server/balancer/TestBalancer.java  | 103 ++-
 .../blockmanagement/TestBlockManager.java   |  13 +-
 .../blockmanagement/TestReplicationPolicy.java  |  93 +++---
 .../TestReplicationPolicyWithNodeGroup.java |   6 +-
 .../TestReplicationPolicyWithUpgradeDomain.java | 171 +++
 .../hdfs/server/namenode/ha/TestDNFencing.java  |  10 +-
 13 files changed, 503 insertions(+), 198 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec414600/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f2d8296..fd560d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1618,6 +1618,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9339. Extend full test of KMS ACLs. (Daniel Templeton via zhz)
 
+HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via 
lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec414600/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 5b3eb36..9f9cdc0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicies;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
@@ -124,6 +125,7 @@ public class Dispatcher {
   private final int ioFileBufferSize;
 
   private final boolean connectToDnViaHostname;
+  private BlockPlacementPolicies placementPolicies;
 
   static class Allocator {
 private final int max;
@@ -949,6 +951,7 @@ public class Dispatcher {
 this.connectToDnViaHostname = conf.getBoolean(
 HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME,
 HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
+placementPolicies = new BlockPlacementPolicies(conf, null, cluster, null);
   }
 
   public DistributedFileSystem getDistributedFileSystem() {
@@ -1166,66 +1169,24 @@ public class Dispatcher {
   }
 }
 
-if (cluster.isNodeGroupAware()
-&& isOnSameNodeGroupWithReplicas(source, target, block)) {
-  return false;
-}
-if (reduceNumOfRacks(source, target, block)) {
+if (!isGoodBlockCandidateForPlacementPolicy(source, target, block)) {
   return false;
 }
 return true;
   }
 
-  /**
-   * Determine whether moving the given block replica from source to target
-   * would reduce the number of racks of the block replicas.
-   */
-  private boolean reduceNumOfRacks(StorageGroup source, StorageGroup target,
-  DBlock block) {
-final DatanodeInfo sourceDn = source.getDatanodeInfo();
-if 

[40/50] [abbrv] hadoop git commit: Add 2.7.3 release to CHANGES.txt

2015-11-04 Thread aengineer
Add 2.7.3 release to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0383a397
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0383a397
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0383a397

Branch: refs/heads/HDFS-7240
Commit: 0383a3973b3b734fb23c331a2256dc92cff05365
Parents: 73b9c7b
Author: Jason Lowe 
Authored: Wed Nov 4 16:26:14 2015 +
Committer: Jason Lowe 
Committed: Wed Nov 4 16:26:14 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index dbf9700..4114bbd 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1404,6 +1404,18 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12484. Single File Rename Throws Incorrectly In Potential Race
 Condition Scenarios. (Gaurav Kanade via cnauroth)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2def995..530ed2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2231,6 +2231,18 @@ Release 2.8.0 - UNRELEASED
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 23bef37..f30f0ef 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -622,6 +622,18 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-6515. Update Application priority in AM side from AM-RM heartbeat
(Sunil G via jlowe)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d6ad672..1784d6e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1042,6 +1042,18 @@ Release 2.8.0 - UNRELEASED
 YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no 
longer 
 binds to default port 8188. (Meng Ding via wangda)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[31/50] [abbrv] hadoop git commit: HADOOP-12541. make re2j dependency consistent (Matthew Paduano via aw)

2015-11-04 Thread aengineer
HADOOP-12541. make re2j dependency consistent (Matthew Paduano via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e0d3532
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e0d3532
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e0d3532

Branch: refs/heads/HDFS-7240
Commit: 6e0d35323505cc68dbd963b8628b89ee04af2f2b
Parents: d565480
Author: Allen Wittenauer 
Authored: Mon Nov 2 20:39:46 2015 -0800
Committer: Allen Wittenauer 
Committed: Mon Nov 2 20:39:46 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-common-project/hadoop-common/pom.xml | 1 -
 hadoop-project/pom.xml  | 5 +
 3 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e0d3532/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1a9c93c..b055069 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -251,6 +251,8 @@ Trunk (Unreleased)
 
 HADOOP-12133. Add schemas to Maven Assembly XMLs (Gábor Lipták via aw)
 
+HADOOP-12541. make re2j dependency consistent (Matthew Paduano via aw)
+
   BUG FIXES
 
 HADOOP-11473. test-patch says "-1 overall" even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e0d3532/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 4e47a3f..4735c6b 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -194,7 +194,6 @@
 
   com.google.re2j
   re2j
-  ${re2j.version}
   compile
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e0d3532/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c974a61..efc3a7d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -800,6 +800,11 @@
 1.8.1
   
   
+com.google.re2j
+re2j
+${re2j.version}
+  
+  
 com.google.protobuf
 protobuf-java
 ${protobuf.version}



[23/50] [abbrv] hadoop git commit: HDFS-9329. TestBootstrapStandby#testRateThrottling is flaky because fsimage size is smaller than IO buffer size. Contributed by Zhe Zhang.

2015-11-04 Thread aengineer
HDFS-9329. TestBootstrapStandby#testRateThrottling is flaky because fsimage 
size is smaller than IO buffer size. Contributed by Zhe Zhang.

Change-Id: I09896c46e9ee0718b67c64fac5acfb3f7decf0b9


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/259bea3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/259bea3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/259bea3b

Branch: refs/heads/HDFS-7240
Commit: 259bea3b48de7469a500831efb3306e8464a2dc9
Parents: 04d97f8
Author: Zhe Zhang 
Authored: Mon Nov 2 10:03:39 2015 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 2 10:03:39 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../namenode/ha/TestBootstrapStandby.java   | 79 +++-
 2 files changed, 62 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/259bea3b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3b2d997..a2e4824 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2204,6 +2204,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9343. Empty caller context considered invalid. (Mingliang Liu via
 Arpit Agarwal)
 
+HDFS-9329. TestBootstrapStandby#testRateThrottling is flaky because fsimage
+size is smaller than IO buffer size. (zhz)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/259bea3b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
index fd45816..9f0d95b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
@@ -25,13 +25,16 @@ import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
@@ -109,12 +112,16 @@ public class TestBootstrapStandby {
 "storage directory does not exist or is not accessible", ioe);
   }
 
+  int expectedCheckpointTxId = (int)NameNodeAdapter.getNamesystem(nn0)
+  .getFSImage().getMostRecentCheckpointTxId();
+
   int rc = BootstrapStandby.run(new String[] { "-nonInteractive" },
   cluster.getConfiguration(index));
   assertEquals(0, rc);
 
   // Should have copied over the namespace from the active
-  FSImageTestUtil.assertNNHasCheckpoints(cluster, index, 
ImmutableList.of(0));
+  FSImageTestUtil.assertNNHasCheckpoints(cluster, index,
+  ImmutableList.of(expectedCheckpointTxId));
 }
 
 // We should now be able to start the standbys successfully.
@@ -221,7 +228,7 @@ public class TestBootstrapStandby {
* {@link DFSConfigKeys#DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_KEY}
* created by HDFS-8808.
*/
-  @Test
+  @Test(timeout=3)
   public void testRateThrottling() throws Exception {
 cluster.getConfiguration(0).setLong(
 DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, 1);
@@ -229,23 +236,46 @@ public class TestBootstrapStandby {
 cluster.waitActive();
 nn0 = cluster.getNameNode(0);
 cluster.transitionToActive(0);
-// Each edit has at least 1 byte. So the lowRate definitely should cause
-// a timeout, if enforced. If lowRate is not enforced, any reasonable test
-// machine should at least download an image with 5 edits in 5 seconds.
-for (int i = 0; i < 5; i++) {
+// Any reasonable test machine should be able to transfer 1 byte per MS
+// (which is ~1K/s)
+final int minXferRatePerMS = 1;
+int imageXferBufferSize = DFSUtilClient.getIoFileBufferSize(
+

[41/50] [abbrv] hadoop git commit: HADOOP-12296. when setnetgrent returns 0 in linux, exception should be thrown. Contributed by Chang Li

2015-11-04 Thread aengineer
HADOOP-12296. when setnetgrent returns 0 in linux, exception should be thrown. 
Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9d25c3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9d25c3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9d25c3e

Branch: refs/heads/HDFS-7240
Commit: b9d25c3ee2d20166d6a786c5a16cc001e249f61c
Parents: 0383a39
Author: Jason Lowe 
Authored: Wed Nov 4 16:34:01 2015 +
Committer: Jason Lowe 
Committed: Wed Nov 4 16:34:01 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../security/JniBasedUnixGroupsNetgroupMapping.c  | 18 +-
 2 files changed, 16 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9d25c3e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4114bbd..efb73f4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1416,6 +1416,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12296. when setnetgrent returns 0 in linux, exception should be
+thrown (Chang Li via jlowe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9d25c3e/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
index de73a8a..4ae1051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
@@ -57,6 +57,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   int setnetgrentCalledFlag = 0;
 
   // if not NULL then THROW exception
+  char *errorType = NULL;
   char *errorMessage = NULL;
 
   cgroup = (*env)->GetStringUTFChars(env, jgroup, NULL);
@@ -94,7 +95,14 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   }
 }
   }
-
+#if defined(__linux__)
+  else {
+errorType = "java/io/IOException";
+errorMessage =
+"no netgroup of this name is known or some other error occurred";
+goto END;
+  }
+#endif
   //--
   // build return data (java array)
 
@@ -103,7 +111,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
 (*env)->FindClass(env, "java/lang/String"),
 NULL);
   if (jusers == NULL) {
-errorMessage = "java/lang/OutOfMemoryError";
+errorType = "java/lang/OutOfMemoryError";
 goto END;
   }
 
@@ -114,7 +122,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   for(current = userListHead; current != NULL; current = current->next) {
 jstring juser = (*env)->NewStringUTF(env, current->string);
 if (juser == NULL) {
-  errorMessage = "java/lang/OutOfMemoryError";
+  errorType = "java/lang/OutOfMemoryError";
   goto END;
 }
 (*env)->SetObjectArrayElement(env, jusers, i++, juser);
@@ -134,8 +142,8 @@ END:
   }
 
   // return results or THROW
-  if(errorMessage) {
-THROW(env, errorMessage, NULL);
+  if(errorType) {
+THROW(env, errorType, errorMessage);
 return NULL;
   } else {
 return jusers;



[12/50] [abbrv] hadoop git commit: HADOOP-12133 Add schemas to Maven Assembly XMLs

2015-11-04 Thread aengineer
HADOOP-12133 Add schemas to Maven Assembly XMLs

Signed-off-by: Allen Wittenauer 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18727c63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18727c63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18727c63

Branch: refs/heads/HDFS-7240
Commit: 18727c63da721da9d29932378818d8742f705808
Parents: 45d3967
Author: Gábor Lipták 
Authored: Sat Jun 27 11:11:20 2015 -0400
Committer: Allen Wittenauer 
Committed: Fri Oct 30 11:36:52 2015 -0700

--
 hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml  | 4 +++-
 .../src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml   | 4 +++-
 .../src/main/resources/assemblies/hadoop-httpfs-dist.xml | 4 +++-
 .../src/main/resources/assemblies/hadoop-kms-dist.xml| 4 +++-
 .../src/main/resources/assemblies/hadoop-mapreduce-dist.xml  | 4 ++--
 .../src/main/resources/assemblies/hadoop-nfs-dist.xml| 4 +++-
 hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml   | 4 +++-
 hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml   | 4 ++--
 hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml | 4 ++--
 .../src/main/resources/assemblies/hadoop-yarn-dist.xml   | 4 ++--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 2 ++
 hadoop-tools/hadoop-sls/src/main/assemblies/sls.xml  | 4 +++-
 12 files changed, 31 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18727c63/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
--
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
index 1a5d7d0..85899e5 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
@@ -14,7 +14,9 @@
See the License for the specific language governing permissions and
limitations under the License.
 -->
-
+http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3
 http://maven.apache.org/xsd/assembly-1.1.3.xsd;>
   hadoop-distro
   
 dir

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18727c63/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
index 89e8771..0edfdeb 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-
+http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3
 http://maven.apache.org/xsd/assembly-1.1.3.xsd;>
   hadoop-hdfs-nfs-dist
   
 dir

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18727c63/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
index 6468a8a..4d508ee 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-
+http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3
 http://maven.apache.org/xsd/assembly-1.1.3.xsd;>
   hadoop-httpfs-dist
   
 dir

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18727c63/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml

[06/50] [abbrv] hadoop git commit: HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.

2015-11-04 Thread aengineer
HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43539b5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43539b5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43539b5f

Branch: refs/heads/HDFS-7240
Commit: 43539b5ff4ac0874a8a454dc93a2a782b0e0ea8f
Parents: ce31b22
Author: Kihwal Lee 
Authored: Fri Oct 30 09:27:21 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 09:29:13 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../blockmanagement/BlockPlacementPolicyDefault.java| 12 
 2 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43539b5f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 38b9e55..f6a22a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,6 +2201,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43539b5f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d9b8d60..f610574 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -659,6 +659,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
+int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -708,6 +709,17 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (storage == null);
   }
+  // Refresh the node count. If the live node count became smaller,
+  // but it is not reflected in this loop, it may loop forever in case
+  // the replicas/rack cannot be satisfied.
+  if (--refreshCounter == 0) {
+refreshCounter = clusterMap.countNumOfAvailableNodes(scope,
+excludedNodes);
+// It has already gone through enough number of nodes.
+if (refreshCounter <= excludedNodes.size()) {
+  break;
+}
+  }
 }
   
 if (numOfReplicas>0) {



[38/50] [abbrv] hadoop git commit: HADOOP-12544. Erasure Coding: create dummy raw coder to isolate performance issues in testing. Contributed by Rui Li.

2015-11-04 Thread aengineer
HADOOP-12544. Erasure Coding: create dummy raw coder to isolate performance 
issues in testing. Contributed by Rui Li.

Change-Id: I9856456b59ed881c5ba2acce51e4d9bd01dc6f48


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e1745d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e1745d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e1745d8

Branch: refs/heads/HDFS-7240
Commit: 3e1745d8e8e5a44f7c8eab9a8234edaf389828c7
Parents: 194251c
Author: Zhe Zhang 
Authored: Tue Nov 3 22:26:27 2015 -0800
Committer: Zhe Zhang 
Committed: Tue Nov 3 22:26:27 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../erasurecode/rawcoder/DummyRawDecoder.java   | 47 +++
 .../erasurecode/rawcoder/DummyRawEncoder.java   | 46 +++
 .../rawcoder/DummyRawErasureCoderFactory.java   | 36 +
 .../hadoop/io/erasurecode/TestCoderBase.java|  4 +
 .../erasurecode/rawcoder/TestDummyRawCoder.java | 83 
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 10 +--
 7 files changed, 224 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e1745d8/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0d1bce2..453efe6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -613,6 +613,9 @@ Trunk (Unreleased)
   HADOOP-12047. Indicate preference not to affect input buffers during
   coding in erasure coder. (Kai Zheng via waltersu4549)
 
+  HADOOP-12544. Erasure Coding: create dummy raw coder to isolate 
performance
+  issues in testing. (Rui Li via zhz)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e1745d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawDecoder.java
new file mode 100644
index 000..25dfa57
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawDecoder.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A dummy raw decoder that does no real computation.
+ * Instead, it just returns zero bytes.
+ * This decoder can be used to isolate the performance issue to HDFS side logic
+ * instead of codec, and is intended for test only.
+ */
+@InterfaceAudience.Private
+public class DummyRawDecoder extends AbstractRawErasureDecoder {
+  public DummyRawDecoder(int numDataUnits, int numParityUnits) {
+super(numDataUnits, numParityUnits);
+  }
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+// Nothing to do. Output buffers have already been reset
+  }
+
+  @Override
+  protected void doDecode(byte[][] inputs, int[] inputOffsets, int dataLen,
+  int[] erasedIndexes, byte[][] outputs, int[] outputOffsets) {
+// Nothing to do. Output buffers have already been reset
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e1745d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawEncoder.java
--
diff --git 

[08/50] [abbrv] hadoop git commit: Creating 2.6.3 entries in CHANGES.txt files.

2015-11-04 Thread aengineer
Creating 2.6.3 entries in CHANGES.txt files.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eadf7b30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eadf7b30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eadf7b30

Branch: refs/heads/HDFS-7240
Commit: eadf7b3096cb010eb7f0afd9afd4ae0d67b2645f
Parents: 3c0204a
Author: Jason Lowe 
Authored: Fri Oct 30 14:50:50 2015 +
Committer: Jason Lowe 
Committed: Fri Oct 30 14:50:50 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eadf7b30/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6b33a2c..ddd0796 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2156,6 +2156,18 @@ Release 2.7.0 - 2015-04-20
 HADOOP-11837. AuthenticationFilter should destroy SignerSecretProvider in
 Tomcat deployments. (Bowen Zhang via wheat9)
 
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eadf7b30/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c5846b3..17df171 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -3298,6 +3298,18 @@ Release 2.7.0 - 2015-04-20
   HDFS-7700. Document quota support for storage types. (Xiaoyu Yao via
   Arpit Agarwal)
 
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eadf7b30/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index af5c3f6..8594e1e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -888,6 +888,18 @@ Release 2.7.0 - 2015-04-20
 MAPREDUCE-6285. ClientServiceDelegate should not retry upon
 AuthenticationException. (Jonathan Eagles via ozawa)
 
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eadf7b30/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 874397d..cc8f5f3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1875,6 +1875,18 @@ Release 2.7.0 - 2015-04-20
 YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
 and node-label column (Jason Lowe via wangda)
 
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES



[16/50] [abbrv] hadoop git commit: Revert "fix CHANGES.txt"

2015-11-04 Thread aengineer
Revert "fix CHANGES.txt"

This reverts commit 3c0204a5866520e74917b26b6ac2061650a5bb6d.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ea4413b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ea4413b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ea4413b

Branch: refs/heads/HDFS-7240
Commit: 2ea4413b15f82a032d6dbd2532861d82a299461a
Parents: a4a6b5b
Author: yliu 
Authored: Sat Oct 31 16:20:37 2015 +0800
Committer: yliu 
Committed: Sat Oct 31 16:20:37 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ea4413b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a61eed..211e7fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,6 +2201,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -2273,9 +2276,6 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[24/50] [abbrv] hadoop git commit: HADOOP-12533. Introduce FileNotFoundException in WASB for read and seek API. Contributed by Dushyanth.

2015-11-04 Thread aengineer
HADOOP-12533. Introduce FileNotFoundException in WASB for read and seek API. 
Contributed by Dushyanth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ce0a650
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ce0a650
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ce0a650

Branch: refs/heads/HDFS-7240
Commit: 3ce0a6502e78240f551c29bb27a2324ce359cd70
Parents: 259bea3
Author: cnauroth 
Authored: Mon Nov 2 09:38:37 2015 -0800
Committer: cnauroth 
Committed: Mon Nov 2 10:17:41 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 125 ++---
 ...estFileSystemOperationExceptionHandling.java | 131 +
 ...perationsExceptionHandlingMultiThreaded.java | 185 +++
 4 files changed, 422 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ce0a650/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5c8daad..c8d60b0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1304,6 +1304,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12519. hadoop-azure tests should avoid creating a metrics
 configuration file in the module root directory. (cnauroth)
 
+HADOOP-12533. Introduce FileNotFoundException in WASB for read and seek 
API.
+(Dushyanth via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ce0a650/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 7c5a504..73bc6b3 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs.azure;
 
 import java.io.DataInputStream;
+import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -49,6 +50,7 @@ import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -62,7 +64,6 @@ import 
org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.codehaus.jackson.JsonNode;
@@ -74,9 +75,11 @@ import org.codehaus.jackson.map.ObjectMapper;
 import com.google.common.annotations.VisibleForTesting;
 import com.microsoft.azure.storage.AccessCondition;
 import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.StorageErrorCode;
 import com.microsoft.azure.storage.StorageException;
 import com.microsoft.azure.storage.blob.CloudBlob;
-import com.microsoft.azure.storage.core.*;
+import com.microsoft.azure.storage.StorageErrorCodeStrings;
+import org.apache.hadoop.io.IOUtils;
 
 /**
  * A {@link FileSystem} for reading and writing files stored on http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ce0a650/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
new file mode 100644
index 000..35a1f50
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for 

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into hdfs-7240

2015-11-04 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/312d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index da09b0e,29bcd79..c93a362
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@@ -2816,30 -2633,14 +2831,30 @@@ public class DataNode extends Reconfigu
}
  
/**
-* Convenience method, which unwraps RemoteException.
-* @throws IOException not a RemoteException.
-*/
 -   * Update replica with the new generation stamp and length.  
++  * Convenience method, which unwraps RemoteException.
++  * @throws IOException not a RemoteException.
++  */
 +  private static ReplicaRecoveryInfo callInitReplicaRecovery(
 +  InterDatanodeProtocol datanode,
 +  RecoveringBlock rBlock) throws IOException {
 +try {
 +  return datanode.initReplicaRecovery(rBlock);
- } catch(RemoteException re) {
++} catch (RemoteException re) {
 +  throw re.unwrapRemoteException();
 +}
 +  }
 +
 +  /**
-* Update replica with the new generation stamp and length.  
++   * Update replica with the new generation stamp and length.
 */
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
--  final long recoveryId, final long newBlockId, final long newLength)
++   final long recoveryId, final long 
newBlockId, final long newLength)
throws IOException {
 -final String storageID = data.updateReplicaUnderRecovery(oldBlock,
 -recoveryId, newBlockId, newLength);
 +final FsDatasetSpi dataset =
 +(FsDatasetSpi) getDataset(oldBlock.getBlockPoolId());
 +final String storageID = dataset.updateReplicaUnderRecovery(
 +oldBlock, recoveryId, newBlockId, newLength);
  // Notify the namenode of the updated block info. This is important
  // for HA, since otherwise the standby node may lose track of the
  // block locations until the next block report.
@@@ -2851,234 -2652,6 +2866,244 @@@
  return storageID;
}
  
-   /** A convenient class used in block recovery */
-   static class BlockRecord { 
++  /**
++   * A convenient class used in block recovery
++   */
++  static class BlockRecord {
 +final DatanodeID id;
 +final InterDatanodeProtocol datanode;
 +final ReplicaRecoveryInfo rInfo;
- 
 +private String storageID;
 +
 +BlockRecord(DatanodeID id,
 +InterDatanodeProtocol datanode,
 +ReplicaRecoveryInfo rInfo) {
 +  this.id = id;
 +  this.datanode = datanode;
 +  this.rInfo = rInfo;
 +}
 +
 +void updateReplicaUnderRecovery(String bpid, long recoveryId,
 +long newBlockId, long newLength)
 +throws IOException {
 +  final ExtendedBlock b = new ExtendedBlock(bpid, rInfo);
 +  storageID = datanode.updateReplicaUnderRecovery(b, recoveryId, 
newBlockId,
 +  newLength);
 +}
 +
 +@Override
 +public String toString() {
 +  return "block:" + rInfo + " node:" + id;
 +}
 +  }
 +
-   /** Recover a block */
++
++  /**
++   * Recover a block
++   */
 +  private void recoverBlock(RecoveringBlock rBlock) throws IOException {
 +ExtendedBlock block = rBlock.getBlock();
 +String blookPoolId = block.getBlockPoolId();
 +DatanodeID[] datanodeids = rBlock.getLocations();
 +List syncList = new 
ArrayList(datanodeids.length);
 +int errorCount = 0;
 +
 +//check generation stamps
- for(DatanodeID id : datanodeids) {
++for (DatanodeID id : datanodeids) {
 +  try {
 +BPOfferService bpos = blockPoolManager.get(blookPoolId);
 +DatanodeRegistration bpReg = bpos.bpRegistration;
- InterDatanodeProtocol datanode = bpReg.equals(id)?
- this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
- dnConf.socketTimeout, dnConf.connectToDnViaHostname);
++InterDatanodeProtocol datanode = bpReg.equals(id) ?
++this : DataNode.createInterDataNodeProtocolProxy(id, getConf(),
++dnConf.socketTimeout, dnConf.connectToDnViaHostname);
 +ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
 +if (info != null &&
 +info.getGenerationStamp() >= block.getGenerationStamp() &&
 +info.getNumBytes() > 0) {
 +  syncList.add(new BlockRecord(id, datanode, info));
 +}
 +  } catch (RecoveryInProgressException ripE) {
 +InterDatanodeProtocol.LOG.warn(
 +"Recovery for replica " + block + " on data-node " + id
- + " is already in 

[05/50] [abbrv] hadoop git commit: YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no longer binds to default port 8188. Contributed by Varun Saxena.

2015-11-04 Thread aengineer
YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no longer 
binds to default port 8188. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce31b227
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce31b227
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce31b227

Branch: refs/heads/HDFS-7240
Commit: ce31b22739512804da38cf87e0ce1059e3128da3
Parents: d21214c
Author: Tsuyoshi Ozawa 
Authored: Fri Oct 30 17:51:39 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Fri Oct 30 18:00:20 2015 +0900

--
 .../mapreduce/jobhistory/TestJobHistoryEventHandler.java  | 10 +++---
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../ApplicationHistoryServer.java |  2 +-
 3 files changed, 11 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce31b227/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index 2b07efb..f213b32 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -489,9 +489,6 @@ public class TestJobHistoryEventHandler {
 TestParams t = new TestParams(false);
 Configuration conf = new YarnConfiguration();
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
-JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
-jheh.init(conf);
 MiniYARNCluster yarnCluster = null;
 long currentTime = System.currentTimeMillis();
 try {
@@ -499,6 +496,13 @@ public class TestJobHistoryEventHandler {
 TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1);
   yarnCluster.init(conf);
   yarnCluster.start();
+  Configuration confJHEH = new YarnConfiguration(conf);
+  confJHEH.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
+  confJHEH.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+  MiniYARNCluster.getHostname() + ":" +
+  yarnCluster.getApplicationHistoryServer().getPort());
+  JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 
0);
+  jheh.init(confJHEH);
   jheh.start();
   TimelineStore ts = yarnCluster.getApplicationHistoryServer()
   .getTimelineStore();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce31b227/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2151136..874397d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1150,6 +1150,9 @@ Release 2.7.2 - UNRELEASED
 YARN-4312. TestSubmitApplicationWithRMHA fails on branch-2.7 and branch-2.6
 as some of the test cases time out. (Varun Saxena via ozawa)
 
+YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no 
longer
+binds to default port 8188. (Varun Saxena via ozawa)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce31b227/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 111a85f..21e1c1a 100644
--- 

[43/50] [abbrv] hadoop git commit: fix up CHANGES.txt

2015-11-04 Thread aengineer
fix up CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fb1ece4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fb1ece4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fb1ece4

Branch: refs/heads/HDFS-7240
Commit: 3fb1ece4e9b290ad4a0b6357a519b20f59561911
Parents: 0eed886
Author: Kihwal Lee 
Authored: Wed Nov 4 12:14:45 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 4 12:15:10 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb1ece4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bdcc1fc..500dc92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2225,9 +2225,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
 endings, fails on Windows. (cnauroth)
 
-HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
-commitBlock. (Chang Li via zhz)
-
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
@@ -2246,6 +2243,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
+commitBlock. (Chang Li via zhz)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[01/50] [abbrv] hadoop git commit: YARN-4313. Race condition in MiniMRYarnCluster when getting history server address. Contributed by Jian He

2015-11-04 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 b14a70e79 -> 312de


YARN-4313. Race condition in MiniMRYarnCluster when getting history
server address. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7412ff48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7412ff48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7412ff48

Branch: refs/heads/HDFS-7240
Commit: 7412ff48eeb967c972c19c1370c77a41c5b3b81f
Parents: e5b1733
Author: Xuan 
Authored: Thu Oct 29 17:36:36 2015 -0700
Committer: Xuan 
Committed: Thu Oct 29 17:36:36 2015 -0700

--
 .../java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java  | 5 -
 hadoop-yarn-project/CHANGES.txt | 3 +++
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7412ff48/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
index 1dd6fca..3521834 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
@@ -190,6 +190,7 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
 public JobHistoryServerWrapper() {
   super(JobHistoryServerWrapper.class.getName());
 }
+private volatile boolean jhsStarted = false;
 
 @Override
 public synchronized void serviceStart() throws Exception {
@@ -211,9 +212,11 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
 new Thread() {
   public void run() {
 historyServer.start();
+jhsStarted = true;
   };
 }.start();
-while (historyServer.getServiceState() == STATE.INITED) {
+
+while (!jhsStarted) {
   LOG.info("Waiting for HistoryServer to start...");
   Thread.sleep(1500);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7412ff48/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0da15bd..d0fa27d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1144,6 +1144,9 @@ Release 2.7.2 - UNRELEASED
 YARN-4183. Enabling generic application history forces every job to get a
 timeline service delegation token (Mit Desai via jeagles)
 
+YARN-4313. Race condition in MiniMRYarnCluster when getting history server
+address. (Jian He via xgong)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[22/50] [abbrv] hadoop git commit: MAPREDUCE-6525. Fix test failure of TestMiniMRClientCluster.testRestart. Contributed by Masatake Iwasaki.

2015-11-04 Thread aengineer
MAPREDUCE-6525. Fix test failure of TestMiniMRClientCluster.testRestart. 
Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04d97f8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04d97f8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04d97f8a

Branch: refs/heads/HDFS-7240
Commit: 04d97f8abb7fcc7b635b9499a48ddaa1fe0ac7e3
Parents: 90e1405
Author: Akira Ajisaka 
Authored: Tue Nov 3 01:48:45 2015 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 3 01:50:07 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../hadoop/mapreduce/v2/MiniMRYarnCluster.java  | 36 
 2 files changed, 24 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04d97f8a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 4d6dcb8..23bef37 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -287,6 +287,9 @@ Trunk (Unreleased)
 MAPREDUCE-5801. Uber mode's log message is missing a vcore reason
 (Steven Wong via aw)
 
+MAPREDUCE-6525. Fix test failure of TestMiniMRClientCluster.testRestart.
+(Masatake Iwasaki via aajisaka)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04d97f8a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
index 3521834..cad6f3a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
@@ -186,6 +186,27 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
 super.serviceInit(conf);
   }
 
+  @Override
+  protected void serviceStart() throws Exception {
+super.serviceStart();
+
+//need to do this because historyServer.init creates a new Configuration
+getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
+
historyServer.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
+MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(),
+MRWebAppUtil.getJHSWebappURLWithoutScheme(historyServer.getConfig()));
+
+LOG.info("MiniMRYARN ResourceManager address: " +
+getConfig().get(YarnConfiguration.RM_ADDRESS));
+LOG.info("MiniMRYARN ResourceManager web address: " +
+WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
+LOG.info("MiniMRYARN HistoryServer address: " +
+getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
+LOG.info("MiniMRYARN HistoryServer web address: " +
+getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
+MRWebAppUtil.getJHSHttpPolicy() == HttpConfig.Policy.HTTPS_ONLY));
+  }
+
   private class JobHistoryServerWrapper extends AbstractService {
 public JobHistoryServerWrapper() {
   super(JobHistoryServerWrapper.class.getName());
@@ -228,21 +249,6 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
   } catch (Throwable t) {
 throw new YarnRuntimeException(t);
   }
-  //need to do this because historyServer.init creates a new Configuration
-  getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
-  
historyServer.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
-  MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(),
-  
MRWebAppUtil.getJHSWebappURLWithoutScheme(historyServer.getConfig()));
-
-  LOG.info("MiniMRYARN ResourceManager address: " +
-   getConfig().get(YarnConfiguration.RM_ADDRESS));
-  LOG.info("MiniMRYARN ResourceManager web address: " +
-   WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
-  LOG.info("MiniMRYARN HistoryServer address: " +
-   getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
-  LOG.info("MiniMRYARN HistoryServer web address: "
-  + getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
-  MRWebAppUtil.getJHSHttpPolicy() == 

[26/50] [abbrv] hadoop git commit: HDFS-9339. Extend full test of KMS ACLs. Contributed by Daniel Templeton.

2015-11-04 Thread aengineer
HDFS-9339. Extend full test of KMS ACLs. Contributed by Daniel Templeton.

Change-Id: I618fa5e85250eabc1eef3d8c11f32700d6fb


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78d68908
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78d68908
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78d68908

Branch: refs/heads/HDFS-7240
Commit: 78d6890865424db850faecfc5c76f14c64925063
Parents: 9e7dcab
Author: Zhe Zhang 
Authored: Mon Nov 2 13:51:45 2015 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 2 13:51:45 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |2 +
 .../apache/hadoop/hdfs/TestAclsEndToEnd.java| 1042 +-
 2 files changed, 1041 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d68908/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a2e4824..fea4106 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1613,6 +1613,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-9229. Expose size of NameNode directory as a metric.
 (Surendra Singh Lilhore via zhz)
 
+HDFS-9339. Extend full test of KMS ACLs. (Daniel Templeton via zhz)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d68908/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
index de0646a..2b515d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
@@ -233,15 +233,15 @@ public class TestAclsEndToEnd {
 keyadminUgi.getUserName());
 conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.ROLLOVER",
 keyadminUgi.getUserName());
-conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET", "");
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET", " ");
 conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_KEYS",
 keyadminUgi.getUserName());
 conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
 hdfsUgi.getUserName());
-conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.SET_KEY_MATERIAL", "");
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.SET_KEY_MATERIAL", " ");
 conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
 hdfsUgi.getUserName());
-conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK", "");
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK", "*");
 
 return conf;
   }
@@ -478,6 +478,1042 @@ public class TestAclsEndToEnd {
   }
 
   /**
+   * Test that key creation is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testCreateKey() throws Exception {
+Configuration conf = new Configuration();
+
+// Correct config with whitelist ACL
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+realUgi.getUserName());
+conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+realUgi.getUserName());
+
+try {
+  setup(conf);
+
+  assertTrue("Exception during key creation with correct config"
+  + " using whitelist key ACLs", createKey(realUgi, KEY1, conf));
+} finally {
+  teardown();
+}
+
+conf = new Configuration();
+
+// Correct config with default ACL
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+realUgi.getUserName());
+conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT",
+realUgi.getUserName());
+
+try {
+  setup(conf);
+
+  assertTrue("Exception during key creation with correct config"
+  + " using default key ACLs", createKey(realUgi, KEY2, conf));
+} finally {
+  teardown();
+}
+
+conf = new Configuration();
+
+// Denied because of blacklist
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+realUgi.getUserName());
+conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.CREATE",
+realUgi.getUserName());
+conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+realUgi.getUserName());
+
+try {
+  setup(conf);
+
+  assertFalse("Allowed key creation with blacklist 

[11/50] [abbrv] hadoop git commit: HADOOP-11919. Empty commit to test github integration.

2015-11-04 Thread aengineer
HADOOP-11919. Empty commit to test github integration.

closes apache/hadoop#40


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45d39679
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45d39679
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45d39679

Branch: refs/heads/HDFS-7240
Commit: 45d39679dfc47e66fe2d69da09689fa62017637f
Parents: 6344b6a
Author: Owen O'Malley 
Authored: Fri Oct 30 10:04:30 2015 -0700
Committer: Owen O'Malley 
Committed: Fri Oct 30 10:21:03 2015 -0700

--

--




[21/50] [abbrv] hadoop git commit: HDFS-8777. Erasure Coding: add tests for taking snapshots on EC files. Contributed by Rakesh R.

2015-11-04 Thread aengineer
HDFS-8777. Erasure Coding: add tests for taking snapshots on EC files. 
Contributed by Rakesh R.

Change-Id: Ia3ae582405e741ca8e90d9255ab9b95d085e5fa8


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90e14055
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90e14055
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90e14055

Branch: refs/heads/HDFS-7240
Commit: 90e14055168afdb93fa8089158c03a6a694e066c
Parents: 2529464
Author: Zhe Zhang 
Authored: Mon Nov 2 07:48:30 2015 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 2 07:48:30 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../TestErasureCodingPolicyWithSnapshot.java| 199 +++
 2 files changed, 202 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90e14055/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0bbc60d..3b2d997 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -189,6 +189,9 @@ Trunk (Unreleased)
 
 HDFS-9323. Randomize the DFSStripedOutputStreamWithFailure tests. 
(szetszwo)
 
+HDFS-8777. Erasure Coding: add tests for taking snapshots on EC files. 
+(Rakesh R via zhz)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90e14055/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
new file mode 100644
index 000..515763c
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestErasureCodingPolicyWithSnapshot {
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+  private Configuration conf;
+
+  private final static short GROUP_SIZE = StripedFileTestUtil.NUM_DATA_BLOCKS
+  + StripedFileTestUtil.NUM_PARITY_BLOCKS;
+  private final static int SUCCESS = 0;
+  private final ErasureCodingPolicy sysDefaultPolicy = 
ErasureCodingPolicyManager
+  .getSystemDefaultPolicy();
+
+  @Before
+  public void setupCluster() throws IOException {
+conf = new HdfsConfiguration();
+cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
+cluster.waitActive();
+fs = cluster.getFileSystem();
+  }
+
+  @After
+  public void shutdownCluster() throws IOException {
+if (cluster != null) {
+  cluster.shutdown();
+}
+  }
+
+  /**
+   * Test correctness of successive snapshot creation and deletion with erasure
+   * coding policies. Create snapshot of ecDir's parent directory.
+   */
+  @Test(timeout = 12)
+  public void testSnapshotsOnErasureCodingDirsParentDir() throws Exception {
+final int len = 1024;
+final Path ecDirParent = new Path("/parent");
+final Path 

[10/50] [abbrv] hadoop git commit: MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary(). Contributed by Junping Du

2015-11-04 Thread aengineer
MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary(). Contributed 
by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6344b6a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6344b6a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6344b6a7

Branch: refs/heads/HDFS-7240
Commit: 6344b6a7694c70f296392b6462dba452ff762109
Parents: 6ae9efa
Author: Jason Lowe 
Authored: Fri Oct 30 15:31:38 2015 +
Committer: Jason Lowe 
Committed: Fri Oct 30 15:31:38 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt   |  6 ++
 .../hadoop/mapreduce/v2/hs/HistoryFileManager.java | 13 ++---
 2 files changed, 16 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6344b6a7/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 22f9e89..32be987 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -671,6 +671,9 @@ Release 2.7.2 - UNRELEASED
 avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state.
 (zhihai xu via devaraj)
 
+MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
+(Junping Du via jlowe)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES
@@ -904,6 +907,9 @@ Release 2.6.3 - UNRELEASED
 avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state.
 (zhihai xu via devaraj)
 
+MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
+(Junping Du via jlowe)
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6344b6a7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index f0786da..b221961 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -966,9 +966,16 @@ public class HistoryFileManager extends AbstractService {
 
   private String getJobSummary(FileContext fc, Path path) throws IOException {
 Path qPath = fc.makeQualified(path);
-FSDataInputStream in = fc.open(qPath);
-String jobSummaryString = in.readUTF();
-in.close();
+FSDataInputStream in = null;
+String jobSummaryString = null;
+try {
+  in = fc.open(qPath);
+  jobSummaryString = in.readUTF();
+} finally {
+  if (in != null) {
+in.close();
+  }
+}
 return jobSummaryString;
   }
 



[18/50] [abbrv] hadoop git commit: HADOOP-12061. Incorrect command in single cluster setup document. Contributed by Kengo Seki.

2015-08-25 Thread aengineer
HADOOP-12061. Incorrect command in single cluster setup document. Contributed 
by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36b1a1e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36b1a1e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36b1a1e7

Branch: refs/heads/HDFS-7240
Commit: 36b1a1e784789170350bcd78f394129ce50ba4e4
Parents: 1e06299
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu Aug 20 11:09:45 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu Aug 20 11:09:45 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/site/markdown/SingleCluster.md.vm   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36b1a1e7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 943dbac..c033f05 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1096,6 +1096,9 @@ Release 2.7.2 - UNRELEASED
 HADOOP-11932. MetricsSinkAdapter may hang  when being stopped.
 (Brahma Reddy Battula via jianhe)
 
+HADOOP-12061. Incorrect command in single cluster setup document.
+(Kengo Seki via aajisaka)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36b1a1e7/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm 
b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
index ca5b48c..2de8b2b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
@@ -140,7 +140,7 @@ If you cannot ssh to localhost without a passphrase, 
execute the following comma
 
   $ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
   $ cat ~/.ssh/id_dsa.pub  ~/.ssh/authorized_keys
-  $ chmod 0700 ~/.ssh/authorized_keys
+  $ chmod 0600 ~/.ssh/authorized_keys
 
 $H3 Execution
 



[02/50] [abbrv] hadoop git commit: HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)

2015-08-25 Thread aengineer
HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71566e23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71566e23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71566e23

Branch: refs/heads/HDFS-7240
Commit: 71566e23820d33e0110ca55eded3299735e970b9
Parents: 51a0096
Author: yliu y...@apache.org
Authored: Tue Aug 18 09:23:06 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Aug 18 09:23:06 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java   | 3 +--
 2 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71566e23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d1b04dc..132adc1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -820,6 +820,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8845. DiskChecker should not traverse the entire tree (Chang Li via
 Colin P. McCabe)
 
+HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71566e23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index cde6588..aad7fec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -33,7 +33,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
-import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
@@ -204,7 +203,7 @@ public class BlockManager implements BlockStatsMXBean {
* DataNode. We'll eventually remove these extras.
*/
   public final MapString, LightWeightLinkedSetBlock excessReplicateMap =
-new TreeMapString, LightWeightLinkedSetBlock();
+new HashMap();
 
   /**
* Store set of Blocks that need to be replicated 1 or more times.



[15/50] [abbrv] hadoop git commit: HDFS-8803. Move DfsClientConf to hdfs-client. Contributed by Mingliang Liu.

2015-08-25 Thread aengineer
HDFS-8803. Move DfsClientConf to hdfs-client. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3aac4758
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3aac4758
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3aac4758

Branch: refs/heads/HDFS-7240
Commit: 3aac4758b007a56e3d66998d457b2156effca528
Parents: f61120d
Author: Haohui Mai whe...@apache.org
Authored: Wed Aug 19 11:28:05 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Aug 19 11:28:05 2015 -0700

--
 .../hdfs/client/HdfsClientConfigKeys.java   |  73 +-
 .../hadoop/hdfs/client/impl/DfsClientConf.java  | 741 +++
 .../hadoop/hdfs/client/impl/package-info.java   |  18 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |   7 +
 .../hadoop/hdfs/util/ByteArrayManager.java  | 422 +++
 .../apache/hadoop/hdfs/util/package-info.java   |  18 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |   8 +-
 .../org/apache/hadoop/hdfs/ClientContext.java   |   5 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  14 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 173 -
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |   3 +-
 .../apache/hadoop/hdfs/HdfsConfiguration.java   |   8 +-
 .../hadoop/hdfs/client/impl/DfsClientConf.java  | 714 --
 .../hadoop/hdfs/server/balancer/Dispatcher.java |   4 +-
 .../hdfs/server/common/HdfsServerConstants.java |   6 -
 .../hadoop/hdfs/server/datanode/DNConf.java |  17 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  11 +-
 .../hdfs/server/datanode/DataXceiver.java   |   5 +-
 .../server/datanode/SecureDataNodeStarter.java  |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   8 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |   8 +-
 .../hdfs/shortcircuit/DomainSocketFactory.java  |   4 +-
 .../hadoop/hdfs/util/ByteArrayManager.java  | 418 ---
 .../org/apache/hadoop/hdfs/TestFiPipelines.java |   9 +-
 .../datanode/TestFiDataTransferProtocol.java|   3 +-
 .../datanode/TestFiDataTransferProtocol2.java   |   5 +-
 .../hadoop/fs/TestEnhancedByteBufferAccess.java |  10 +-
 .../java/org/apache/hadoop/fs/TestUnbuffer.java |   7 +-
 .../fs/viewfs/TestViewFsDefaultValue.java   |   8 +-
 .../apache/hadoop/hdfs/BlockReaderTestUtil.java |   6 +-
 .../org/apache/hadoop/hdfs/FileAppendTest4.java |   5 +-
 .../hadoop/hdfs/TestBlockReaderFactory.java |   4 +-
 .../hadoop/hdfs/TestBlockReaderLocal.java   |   4 +-
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java |   6 +-
 .../TestClientProtocolForPipelineRecovery.java  |   4 +-
 .../org/apache/hadoop/hdfs/TestConnCache.java   |   5 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   |   8 +-
 .../apache/hadoop/hdfs/TestDFSInputStream.java  |   2 +-
 .../hadoop/hdfs/TestDataTransferKeepalive.java  |   8 +-
 .../hadoop/hdfs/TestDataTransferProtocol.java   |   6 +-
 .../apache/hadoop/hdfs/TestDatanodeDeath.java   |   5 +-
 .../hadoop/hdfs/TestDisableConnCache.java   |   3 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  |   7 +-
 .../org/apache/hadoop/hdfs/TestFileAppend2.java |   6 +-
 .../org/apache/hadoop/hdfs/TestFileAppend4.java |   5 +-
 .../apache/hadoop/hdfs/TestFileCreation.java|  11 +-
 .../java/org/apache/hadoop/hdfs/TestHFlush.java |   3 +-
 .../apache/hadoop/hdfs/TestParallelRead.java|   2 +-
 .../TestParallelShortCircuitLegacyRead.java |   4 +-
 .../TestParallelShortCircuitReadUnCached.java   |   6 +-
 .../hadoop/hdfs/TestParallelUnixDomainRead.java |   2 +-
 .../org/apache/hadoop/hdfs/TestPipelines.java   |   9 +-
 .../java/org/apache/hadoop/hdfs/TestPread.java  |   2 +-
 .../java/org/apache/hadoop/hdfs/TestRead.java   |   5 +-
 .../hadoop/hdfs/TestRemoteBlockReader.java  |   4 +-
 .../blockmanagement/TestBlockTokenWithDFS.java  |   6 +-
 .../server/datanode/TestBlockReplacement.java   |   7 +-
 .../server/datanode/TestCachingStrategy.java|   7 +-
 .../datanode/TestDataNodeVolumeFailure.java |   6 +-
 .../fsdataset/impl/LazyPersistTestCase.java |   5 +-
 .../fsdataset/impl/TestDatanodeRestart.java |   7 +-
 .../shortcircuit/TestShortCircuitCache.java |  13 +-
 .../shortcircuit/TestShortCircuitLocalRead.java |   6 +-
 64 files changed, 1591 insertions(+), 1331 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 

[23/50] [abbrv] hadoop git commit: HDFS-8922. Link the native_mini_dfs test library with libdl, since IBM Java requires it (Ayappan via Colin P. McCabe)

2015-08-25 Thread aengineer
HDFS-8922. Link the native_mini_dfs test library with libdl, since IBM Java 
requires it (Ayappan via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7642f64c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7642f64c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7642f64c

Branch: refs/heads/HDFS-7240
Commit: 7642f64c24961d2b4772591a0957e2699162a083
Parents: fc07464
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Aug 20 13:57:32 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Thu Aug 20 13:57:32 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7642f64c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fad2a867..dcc5d58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1191,6 +1191,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8908. TestAppendSnapshotTruncate may fail with IOException: Failed to
 replace a bad datanode. (Tsz Wo Nicholas Sze via yliu)
 
+HDFS-8922. Link the native_mini_dfs test library with libdl, since IBM Java
+requires it (Ayappan via Colin P. McCabe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7642f64c/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index b4a3b40..2f8620b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -144,6 +144,7 @@ add_library(native_mini_dfs
 )
 target_link_libraries(native_mini_dfs
 ${JAVA_JVM_LIBRARY}
+${LIB_DL}
 ${OS_LINK_LIBRARIES}
 )
 



[24/50] [abbrv] hadoop git commit: HDFS-8809. HDFS fsck reports under construction blocks as CORRUPT. Contributed by Jing Zhao.

2015-08-25 Thread aengineer
HDFS-8809. HDFS fsck reports under construction blocks as CORRUPT. Contributed 
by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8bca627
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8bca627
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8bca627

Branch: refs/heads/HDFS-7240
Commit: c8bca62718203a1dad9b70d164bdf10cc71b40cd
Parents: 7642f64
Author: Jing Zhao ji...@apache.org
Authored: Thu Aug 20 16:31:24 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Thu Aug 20 16:31:24 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  2 ++
 .../apache/hadoop/hdfs/server/namenode/NamenodeFsck.java  | 10 ++
 .../org/apache/hadoop/hdfs/server/namenode/TestFsck.java  |  2 ++
 3 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8bca627/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dcc5d58..d9d176b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1194,6 +1194,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8922. Link the native_mini_dfs test library with libdl, since IBM Java
 requires it (Ayappan via Colin P. McCabe)
 
+HDFS-8809. HDFS fsck reports under construction blocks as CORRUPT. 
(jing9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8bca627/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 2f989d1..c7892b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -528,6 +528,9 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   LocatedBlocks blocks) throws IOException {
 String path = file.getFullName(parent);
 boolean isOpen = blocks.isUnderConstruction();
+if (isOpen  !showOpenFiles) {
+  return;
+}
 int missing = 0;
 int corrupt = 0;
 long missize = 0;
@@ -536,8 +539,15 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 int misReplicatedPerFile = 0;
 StringBuilder report = new StringBuilder();
 int blockNumber = 0;
+final LocatedBlock lastBlock = blocks.getLastLocatedBlock();
 for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
   ExtendedBlock block = lBlk.getBlock();
+  if (!blocks.isLastBlockComplete()  lastBlock != null 
+  lastBlock.getBlock().equals(block)) {
+// this is the last block and this is not complete. ignore it since
+// it is under construction
+continue;
+  }
   BlockManager bm = namenode.getNamesystem().getBlockManager();
 
   final BlockInfo storedBlock = bm.getStoredBlock(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8bca627/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 8818f17..2226947 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSInputStream;
+import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -612,6 +613,7 @@ public class TestFsck {
 out.write(randomString.getBytes());
 writeCount++;  
   }
+  ((DFSOutputStream) out.getWrappedStream()).hflush();
   // We expect the filesystem to be HEALTHY and show one open file
   outStr = runFsck(conf, 0, true, topDir);
   System.out.println(outStr);



[09/50] [abbrv] hadoop git commit: YARN-4028. AppBlock page key update and diagnostics value null on recovery. Contributed by Bibin A Chundatt

2015-08-25 Thread aengineer
YARN-4028. AppBlock page key update and diagnostics value null on
recovery. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22dc5fc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22dc5fc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22dc5fc2

Branch: refs/heads/HDFS-7240
Commit: 22dc5fc20942a8d98333ed5470b5fb8c1d257f1a
Parents: 7ecbfd4
Author: Xuan xg...@apache.org
Authored: Tue Aug 18 22:53:03 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Tue Aug 18 22:53:03 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/yarn/server/webapp/AppBlock.java| 6 +++---
 .../hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java| 3 ++-
 3 files changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22dc5fc2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 707300a..b22777c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -780,6 +780,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3987. Send AM container completed msg to NM once AM finishes.
 (sandflee via jianhe)
 
+YARN-4028. AppBlock page key update and diagnostics value null on recovery
+(Bibin A Chundatt via xgong)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22dc5fc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 98a8f81..31a2c8a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -195,13 +195,13 @@ public class AppBlock extends HtmlBlock {
  webUiType.equals(YarnWebParams.RM_WEB_UI)) {
   LogAggregationStatus status = getLogAggregationStatus();
   if (status == null) {
-overviewTable._(Log Aggregation Status, N/A);
+overviewTable._(Log Aggregation Status:, N/A);
   } else if (status == LogAggregationStatus.DISABLED
   || status == LogAggregationStatus.NOT_START
   || status == LogAggregationStatus.SUCCEEDED) {
-overviewTable._(Log Aggregation Status, status.name());
+overviewTable._(Log Aggregation Status:, status.name());
   } else {
-overviewTable._(Log Aggregation Status,
+overviewTable._(Log Aggregation Status:,
 root_url(logaggregationstatus, app.getAppId()), status.name());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22dc5fc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 42ff1de..2eb74f7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -782,7 +782,8 @@ public class RMAppImpl implements RMApp, Recoverable {
 LOG.info(Recovering app:  + getApplicationId() +  with  + 
 + appState.getAttemptCount() +  attempts and final state = 
 + this.recoveredFinalState );
-this.diagnostics.append(appState.getDiagnostics());
+this.diagnostics.append(null == appState.getDiagnostics() ?  : appState
+.getDiagnostics());
 this.storedFinishTime = appState.getFinishTime();
 this.startTime = appState.getStartTime();
 



[03/50] [abbrv] hadoop git commit: HDFS-8852. HDFS architecture documentation of version 2.x is outdated about append write support. Contributed by Ajith S.

2015-08-25 Thread aengineer
HDFS-8852. HDFS architecture documentation of version 2.x is outdated about 
append write support. Contributed by Ajith S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc509f66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc509f66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc509f66

Branch: refs/heads/HDFS-7240
Commit: fc509f66d814e7a5ed81d5d73b23c400625d573b
Parents: 71566e2
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Aug 18 23:31:52 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Aug 18 23:31:52 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc509f66/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 132adc1..3a7b816 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1191,6 +1191,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-8806. Inconsistent metrics: number of missing blocks with replication
 factor 1 not properly cleared. (Zhe Zhang via aajisaka)
 
+HDFS-8852. HDFS architecture documentation of version 2.x is outdated
+about append write support. (Ajith S via aajisaka)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc509f66/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
index a30877a..aa94a2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
@@ -73,7 +73,7 @@ Applications that run on HDFS have large data sets. A typical 
file in HDFS is gi
 
 ### Simple Coherency Model
 
-HDFS applications need a write-once-read-many access model for files. A file 
once created, written, and closed need not be changed. This assumption 
simplifies data coherency issues and enables high throughput data access. A 
Map/Reduce application or a web crawler application fits perfectly with this 
model. There is a plan to support appending-writes to files in the future.
+HDFS applications need a write-once-read-many access model for files. A file 
once created, written, and closed need not be changed except for appends and 
truncates. Appending the content to the end of the files is supported but 
cannot be updated at arbitrary point. This assumption simplifies data coherency 
issues and enables high throughput data access. A MapReduce application or a 
web crawler application fits perfectly with this model.
 
 ### Moving Computation is Cheaper than Moving Data
 



[06/50] [abbrv] hadoop git commit: hadoop-12050. Enable MaxInactiveInterval for hadoop http auth token. Contributed by Huizhi Lu.

2015-08-25 Thread aengineer
hadoop-12050. Enable MaxInactiveInterval for hadoop http auth token. 
Contributed by Huizhi Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71aedfab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71aedfab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71aedfab

Branch: refs/heads/HDFS-7240
Commit: 71aedfabf39e03104c8d22456e95ef6349aae6c0
Parents: 14215c8
Author: Benoy Antony be...@apache.org
Authored: Tue Aug 18 13:43:34 2015 -0700
Committer: Benoy Antony be...@apache.org
Committed: Tue Aug 18 13:43:34 2015 -0700

--
 .../server/AuthenticationFilter.java|  63 +--
 .../server/AuthenticationToken.java |  12 ++
 .../security/authentication/util/AuthToken.java |  34 +++-
 .../server/TestAuthenticationFilter.java| 163 ++-
 .../src/site/markdown/HttpAuthentication.md |   8 +-
 5 files changed, 258 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71aedfab/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index bf44f48..e0da38b 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -146,6 +146,13 @@ public class AuthenticationFilter implements Filter {
   public static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + 
.file;
 
   /**
+   * Constant for the configuration property
+   * that indicates the max inactive interval of the generated token.
+   */
+  public static final String
+  AUTH_TOKEN_MAX_INACTIVE_INTERVAL = token.MaxInactiveInterval;
+
+  /**
* Constant for the configuration property that indicates the validity of 
the generated token.
*/
   public static final String AUTH_TOKEN_VALIDITY = token.validity;
@@ -190,6 +197,7 @@ public class AuthenticationFilter implements Filter {
   private Signer signer;
   private SignerSecretProvider secretProvider;
   private AuthenticationHandler authHandler;
+  private long maxInactiveInterval;
   private long validity;
   private String cookieDomain;
   private String cookiePath;
@@ -227,6 +235,8 @@ public class AuthenticationFilter implements Filter {
   authHandlerClassName = authHandlerName;
 }
 
+maxInactiveInterval = Long.parseLong(config.getProperty(
+AUTH_TOKEN_MAX_INACTIVE_INTERVAL, 1800)) * 1000; // 30 minutes;
 validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, 36000))
 * 1000; //10 hours
 initializeSecretProvider(filterConfig);
@@ -355,6 +365,15 @@ public class AuthenticationFilter implements Filter {
   }
 
   /**
+   * Returns the max inactive interval time of the generated tokens.
+   *
+   * @return the max inactive interval time of the generated tokens in seconds.
+   */
+  protected long getMaxInactiveInterval() {
+return maxInactiveInterval / 1000;
+  }
+
+  /**
* Returns the validity time of the generated tokens.
*
* @return the validity time of the generated tokens, in seconds.
@@ -510,8 +529,10 @@ public class AuthenticationFilter implements Filter {
* @throws ServletException thrown if a processing error occurred.
*/
   @Override
-  public void doFilter(ServletRequest request, ServletResponse response, 
FilterChain filterChain)
-  throws IOException, ServletException {
+  public void doFilter(ServletRequest request,
+   ServletResponse response,
+   FilterChain filterChain)
+   throws IOException, ServletException {
 boolean unauthorizedResponse = true;
 int errCode = HttpServletResponse.SC_UNAUTHORIZED;
 AuthenticationException authenticationEx = null;
@@ -533,19 +554,27 @@ public class AuthenticationFilter implements Filter {
   if (authHandler.managementOperation(token, httpRequest, httpResponse)) {
 if (token == null) {
   if (LOG.isDebugEnabled()) {
-LOG.debug(Request [{}] triggering authentication, 
getRequestURL(httpRequest));
+LOG.debug(Request [{}] triggering authentication,
+getRequestURL(httpRequest));
   }
   token = authHandler.authenticate(httpRequest, httpResponse);
-  if (token != null  token.getExpires() != 0 
-  token != 

[16/50] [abbrv] hadoop git commit: HDFS-8917. Cleanup BlockInfoUnderConstruction from comments and tests. Contributed by Zhe Zhang.

2015-08-25 Thread aengineer
HDFS-8917. Cleanup BlockInfoUnderConstruction from comments and tests. 
Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e14f798
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e14f798
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e14f798

Branch: refs/heads/HDFS-7240
Commit: 4e14f7982a6e57bf08deb3b266806c2b779a157d
Parents: 3aac475
Author: Jing Zhao ji...@apache.org
Authored: Wed Aug 19 15:11:37 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Aug 19 15:11:37 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/blockmanagement/BlockInfo.java  |  1 -
 .../blockmanagement/BlockInfoContiguous.java|  3 +-
 .../server/blockmanagement/BlockManager.java|  2 +-
 .../BlockUnderConstructionFeature.java  |  4 +-
 .../namenode/FileUnderConstructionFeature.java  |  2 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  2 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |  4 +-
 .../TestBlockInfoUnderConstruction.java | 80 
 .../TestBlockUnderConstructionFeature.java  | 80 
 .../namenode/snapshot/SnapshotTestHelper.java   |  4 +-
 11 files changed, 93 insertions(+), 92 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e14f798/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b7fbc23..080f0d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -816,6 +816,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8803. Move DfsClientConf to hdfs-client. (Mingliang Liu via wheat9)
 
+HDFS-8917. Cleanup BlockInfoUnderConstruction from comments and tests.
+(Zhe Zhang via jing9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e14f798/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 94dac35..659be56 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -78,7 +78,6 @@ public abstract class  BlockInfo extends Block
 
   /**
* Copy construction.
-   * This is used to convert BlockInfoUnderConstruction
* @param from BlockInfo to copy from.
*/
   protected BlockInfo(BlockInfo from) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e14f798/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index eff89a8..42934c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -37,8 +37,7 @@ public class BlockInfoContiguous extends BlockInfo {
 
   /**
* Copy construction.
-   * This is used to convert BlockReplicationInfoUnderConstruction
-   * @param from BlockReplicationInfo to copy from.
+   * @param from BlockInfoContiguous to copy from.
*/
   protected BlockInfoContiguous(BlockInfoContiguous from) {
 super(from);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e14f798/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index aad7fec..f2d0515 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 

[04/50] [abbrv] hadoop git commit: YARN-3857: Memory leak in ResourceManager with SIMPLE mode. Contributed by mujunchao.

2015-08-25 Thread aengineer
YARN-3857: Memory leak in ResourceManager with SIMPLE mode. Contributed by 
mujunchao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a76a010
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a76a010
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a76a010

Branch: refs/heads/HDFS-7240
Commit: 3a76a010b85176f2bcb85ed6f74c25dcb8acfe4d
Parents: fc509f6
Author: Zhihai Xu z...@apache.org
Authored: Tue Aug 18 10:36:40 2015 -0700
Committer: Zhihai Xu z...@apache.org
Committed: Tue Aug 18 10:36:40 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  5 ++-
 .../rmapp/attempt/RMAppAttemptImpl.java |  8 +++--
 .../ClientToAMTokenSecretManagerInRM.java   |  7 +
 .../attempt/TestRMAppAttemptTransitions.java| 32 
 4 files changed, 48 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a76a010/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 66978a0..d900617 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -827,7 +827,10 @@ Release 2.7.2 - UNRELEASED
 
 YARN-3999. RM hangs on draing events. (Jian He via xgong)
 
-Release 2.7.1 - 2015-07-06 
+YARN-3857: Memory leak in ResourceManager with SIMPLE mode.
+(mujunchao via zxu)
+
+Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a76a010/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 80f5eb0..74a4000 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1309,9 +1309,11 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 
   // register the ClientTokenMasterKey after it is saved in the store,
   // otherwise client may hold an invalid ClientToken after RM restarts.
-  appAttempt.rmContext.getClientToAMTokenSecretManager()
-  .registerApplication(appAttempt.getAppAttemptId(),
-appAttempt.getClientTokenMasterKey());
+  if (UserGroupInformation.isSecurityEnabled()) {
+appAttempt.rmContext.getClientToAMTokenSecretManager()
+.registerApplication(appAttempt.getAppAttemptId(),
+appAttempt.getClientTokenMasterKey());
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a76a010/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java
index 4fbe2ce..4047bd5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java
@@ -22,6 +22,7 @@ import java.util.HashMap;
 import java.util.Map;
 
 import javax.crypto.SecretKey;
+import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager;
@@ -61,4 +62,10 @@ public class ClientToAMTokenSecretManagerInRM 

  1   2   3   4   5   6   7   8   9   10   >