hadoop git commit: HADOOP-11609. Correct credential commands info in CommandsManual.html#credential. Contributed by Varun Saxena.

2015-03-24 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk c6c396fcd - 6e891a921


HADOOP-11609. Correct credential commands info in 
CommandsManual.html#credential. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e891a92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e891a92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e891a92

Branch: refs/heads/trunk
Commit: 6e891a921e00b122390a976dfd13838472a7fcc6
Parents: c6c396f
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 24 20:57:39 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 24 20:57:39 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/security/alias/CredentialShell.java   | 2 +-
 .../hadoop-common/src/site/markdown/CommandsManual.md| 4 ++--
 3 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e891a92/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 430015d..4f0cf97 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1136,6 +1136,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11729. Fix link to cgroups doc in site.xml. (Masatake Iwasaki via
 ozawa)
 
+HADOOP-11609. Correct credential commands info in
+CommandsManual.html#credential. (Varun Saxena via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e891a92/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
index e8a721f..265ed16 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
@@ -81,7 +81,7 @@ public class CredentialShell extends Configured implements 
Tool {
* pre
* % hadoop credential create alias [-provider providerPath]
* % hadoop credential list [-provider providerPath]
-   * % hadoop credential delete alias [-provider providerPath] [-i]
+   * % hadoop credential delete alias [-provider providerPath] [-f]
* /pre
* @param args
* @return 0 if the argument(s) were recognized, 1 otherwise

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e891a92/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 33986ae..207160e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -128,8 +128,8 @@ Usage: `hadoop credential subcommand [options]`
 
 | COMMAND\_OPTION | Description |
 |: |: |
-| create *alias* [-v *value*][-provider *provider-path*] | Prompts the user 
for a credential to be stored as the given alias when a value is not provided 
via `-v`. The *hadoop.security.credential.provider.path* within the 
core-site.xml file will be used unless a `-provider` is indicated. |
-| delete *alias* [-i][-provider *provider-path*] | Deletes the credential with 
the provided alias and optionally warns the user when `--interactive` is used. 
The *hadoop.security.credential.provider.path* within the core-site.xml file 
will be used unless a `-provider` is indicated. |
+| create *alias* [-provider *provider-path*] | Prompts the user for a 
credential to be stored as the given alias. The 
*hadoop.security.credential.provider.path* within the core-site.xml file will 
be used unless a `-provider` is indicated. |
+| delete *alias* [-provider *provider-path*] [-f] | Deletes the credential 
with the provided alias. The *hadoop.security.credential.provider.path* within 
the core-site.xml file will be used unless a `-provider` is indicated. The 
command asks for confirmation unless `-f` is specified |
 | list [-provider *provider-path*] | Lists all of the credential aliases The 
*hadoop.security.credential.provider.path* within the core-site.xml file will 
be used unless 

hadoop git commit: HADOOP-11609. Correct credential commands info in CommandsManual.html#credential. Contributed by Varun Saxena.

2015-03-24 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c3e2c20c9 - a732b58be


HADOOP-11609. Correct credential commands info in 
CommandsManual.html#credential. Contributed by Varun Saxena.

(cherry picked from commit 6e891a921e00b122390a976dfd13838472a7fcc6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a732b58b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a732b58b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a732b58b

Branch: refs/heads/branch-2
Commit: a732b58bebd3592144dc138effb02180ebc98016
Parents: c3e2c20
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 24 20:57:39 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 24 20:57:56 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/security/alias/CredentialShell.java   | 2 +-
 .../hadoop-common/src/site/markdown/CommandsManual.md| 4 ++--
 3 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a732b58b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ba6ab97..9474f02 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -725,6 +725,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11729. Fix link to cgroups doc in site.xml. (Masatake Iwasaki via
 ozawa)
 
+HADOOP-11609. Correct credential commands info in
+CommandsManual.html#credential. (Varun Saxena via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a732b58b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
index 7a632c1..3405cb7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
@@ -81,7 +81,7 @@ public class CredentialShell extends Configured implements 
Tool {
* pre
* % hadoop credential create alias [-provider providerPath]
* % hadoop credential list [-provider providerPath]
-   * % hadoop credential delete alias [-provider providerPath] [-i]
+   * % hadoop credential delete alias [-provider providerPath] [-f]
* /pre
* @param args
* @return 0 if the argument(s) were recognized, 1 otherwise

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a732b58b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 3a61445..ae9c442 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -106,8 +106,8 @@ Usage: `hadoop credential subcommand [options]`
 
 | COMMAND\_OPTION | Description |
 |: |: |
-| create *alias* [-v *value*][-provider *provider-path*] | Prompts the user 
for a credential to be stored as the given alias when a value is not provided 
via `-v`. The *hadoop.security.credential.provider.path* within the 
core-site.xml file will be used unless a `-provider` is indicated. |
-| delete *alias* [-i][-provider *provider-path*] | Deletes the credential with 
the provided alias and optionally warns the user when `--interactive` is used. 
The *hadoop.security.credential.provider.path* within the core-site.xml file 
will be used unless a `-provider` is indicated. |
+| create *alias* [-provider *provider-path*] | Prompts the user for a 
credential to be stored as the given alias. The 
*hadoop.security.credential.provider.path* within the core-site.xml file will 
be used unless a `-provider` is indicated. |
+| delete *alias* [-provider *provider-path*] [-f] | Deletes the credential 
with the provided alias. The *hadoop.security.credential.provider.path* within 
the core-site.xml file will be used unless a `-provider` is indicated. The 
command asks for confirmation unless `-f` is specified |
 | list [-provider *provider-path*] | Lists all of the credential aliases The 

hadoop git commit: HADOOP-11609. Correct credential commands info in CommandsManual.html#credential. Contributed by Varun Saxena.

2015-03-24 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 da318cbbd - 913fdea4e


HADOOP-11609. Correct credential commands info in 
CommandsManual.html#credential. Contributed by Varun Saxena.

(cherry picked from commit 6e891a921e00b122390a976dfd13838472a7fcc6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/913fdea4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/913fdea4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/913fdea4

Branch: refs/heads/branch-2.7
Commit: 913fdea4e2e3220445a16789addb488019bd001a
Parents: da318cb
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 24 20:57:39 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 24 20:58:13 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/security/alias/CredentialShell.java   | 2 +-
 .../hadoop-common/src/site/markdown/CommandsManual.md| 4 ++--
 3 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/913fdea4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f7a7775..60af63f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -686,6 +686,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11729. Fix link to cgroups doc in site.xml. (Masatake Iwasaki via
 ozawa)
 
+HADOOP-11609. Correct credential commands info in
+CommandsManual.html#credential. (Varun Saxena via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/913fdea4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
index 7a632c1..3405cb7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
@@ -81,7 +81,7 @@ public class CredentialShell extends Configured implements 
Tool {
* pre
* % hadoop credential create alias [-provider providerPath]
* % hadoop credential list [-provider providerPath]
-   * % hadoop credential delete alias [-provider providerPath] [-i]
+   * % hadoop credential delete alias [-provider providerPath] [-f]
* /pre
* @param args
* @return 0 if the argument(s) were recognized, 1 otherwise

http://git-wip-us.apache.org/repos/asf/hadoop/blob/913fdea4/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 3a61445..ae9c442 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -106,8 +106,8 @@ Usage: `hadoop credential subcommand [options]`
 
 | COMMAND\_OPTION | Description |
 |: |: |
-| create *alias* [-v *value*][-provider *provider-path*] | Prompts the user 
for a credential to be stored as the given alias when a value is not provided 
via `-v`. The *hadoop.security.credential.provider.path* within the 
core-site.xml file will be used unless a `-provider` is indicated. |
-| delete *alias* [-i][-provider *provider-path*] | Deletes the credential with 
the provided alias and optionally warns the user when `--interactive` is used. 
The *hadoop.security.credential.provider.path* within the core-site.xml file 
will be used unless a `-provider` is indicated. |
+| create *alias* [-provider *provider-path*] | Prompts the user for a 
credential to be stored as the given alias. The 
*hadoop.security.credential.provider.path* within the core-site.xml file will 
be used unless a `-provider` is indicated. |
+| delete *alias* [-provider *provider-path*] [-f] | Deletes the credential 
with the provided alias. The *hadoop.security.credential.provider.path* within 
the core-site.xml file will be used unless a `-provider` is indicated. The 
command asks for confirmation unless `-f` is specified |
 | list [-provider *provider-path*] | Lists all of the credential aliases The 

hadoop git commit: Fix CHANGES.txt for HADOOP-11602.

2015-03-24 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6e891a921 - 3ca5bd163


Fix CHANGES.txt for HADOOP-11602.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ca5bd16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ca5bd16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ca5bd16

Branch: refs/heads/trunk
Commit: 3ca5bd163292e661473017e70b9ca77f5a5b78c0
Parents: 6e891a9
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 24 21:06:26 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 24 21:06:26 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ca5bd16/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4f0cf97..cdb88d2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -415,8 +415,6 @@ Trunk (Unreleased)
 HADOOP-10774. Update KerberosTestUtils for hadoop-auth tests when using
 IBM Java (sangamesh via aw)
 
-HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
-
 HADOOP-11653. shellprofiles should require .sh extension
 (Brahma Reddy Battula via aw)
 
@@ -1105,6 +1103,8 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
 
+HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
+
 HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN.
 (Duo Zhang via wheat9)
 



hadoop git commit: HDFS-7713. Implement mkdirs in the HDFS Web UI. Contributed by Ravi Prakash.

2015-03-24 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 483f77b75 - 161dae898


HDFS-7713. Implement mkdirs in the HDFS Web UI. Contributed by Ravi Prakash.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/161dae89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/161dae89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/161dae89

Branch: refs/heads/branch-2
Commit: 161dae898fc2f45410825b2fcbcd76197dfd5d6b
Parents: 483f77b
Author: Haohui Mai whe...@apache.org
Authored: Tue Mar 24 15:48:52 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue Mar 24 15:49:08 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../src/main/webapps/hdfs/explorer.html | 53 ++--
 .../src/main/webapps/hdfs/explorer.js   | 22 
 3 files changed, 72 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/161dae89/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 07c9c62..bb9f7ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -22,6 +22,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7854. Separate class DataStreamer out of DFSOutputStream. (Li Bo via
 jing9)
 
+HDFS-7713. Implement mkdirs in the HDFS Web UI. (Ravi Prakash via wheat9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/161dae89/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 7b34044..cd6623c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -87,13 +87,56 @@
button type=button class=close 
onclick=$('#alert-panel').hide();times;/button
div class=alert-body id=alert-panel-body/div
   /div
+
+div class=modal id=btn-create-directory tabindex=-1 role=dialog
+  aria-hidden=true
+  div class=modal-dialog
+div class=modal-content
+  div class=modal-header
+button type=button class=close
+  data-dismiss=modal aria-hidden=truetimes;/button
+h4 class=modal-titleCreate Directory/h4
+  /div
+  div class=modal-body
+div class=form-group
+  div class=input-group
+span class=input-group-addon id=new_directory_pwd/span
+input type=text class=form-control id=new_directory
+  placeholder=New Directory Name /
+  /div
+/div
+  /div
+  div class=modal-footer
+button type=button class=btn 
data-dismiss=modalCancel/button
+button type=button class=btn btn-success
+  id=btn-create-directory-send data-complete-text=Creating...
+  Create
+/button
+  /div
+/div
+  /div
+/div
+
   div class=row
-   form onsubmit=return false;
- div class=input-groupinput type=text class=form-control id=
- directory / span 
class=input-group-btnbutton class=btn btn-default
-   
   type=submit id=btn-nav-directoryspan 
class=input-group-btnGo!/span/button/span/div
-   /form
+  div class=col-xs-11
+form onsubmit=return false;
+  div class=input-group
+input type=text class=form-control id=directory/
+span class=input-group-btn
+  button class=btn btn-default type=button 
id=btn-nav-directoryGo!/button
+/span
+  /div
+/form
+  /div
+  div class=col-xs-1
+button type=button class=btn btn-default data-toggle=modal
+  aria-label=New Directory data-target=#btn-create-directory
+  title=Create Directory
+span class=glyphicon glyphicon-folder-open/span
+/button
   /div
+/div
+
   br /
   div id=panel/div
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/161dae89/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 131b2aa..5572880 100644
--- 

hadoop git commit: HDFS-6826. Plugin interface to enable delegation of HDFS authorization assertions. Contributed by Arun Suresh.

2015-03-24 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk e38ef70fb - 53a28afe2


HDFS-6826. Plugin interface to enable delegation of HDFS authorization 
assertions. Contributed by Arun Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53a28afe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53a28afe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53a28afe

Branch: refs/heads/trunk
Commit: 53a28afe293e5bf185c8d4f2c7aea212e66015c2
Parents: e38ef70
Author: Jitendra Pandey Jitendra@Jitendra-Pandeys-MacBook-Pro-4.local
Authored: Tue Mar 24 15:43:03 2015 -0700
Committer: Jitendra Pandey Jitendra@Jitendra-Pandeys-MacBook-Pro-4.local
Committed: Tue Mar 24 16:02:40 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../DefaultINodeAttributesProvider.java |  45 
 .../server/namenode/FSDirStatAndListingOp.java  |  51 +++--
 .../hdfs/server/namenode/FSDirectory.java   |  41 +++-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  19 ++
 .../server/namenode/FSPermissionChecker.java| 222 +++---
 .../server/namenode/INodeAttributeProvider.java | 135 +++
 .../hdfs/server/namenode/INodeAttributes.java   |   3 +
 .../namenode/INodeDirectoryAttributes.java  |   4 +
 .../server/namenode/INodeFileAttributes.java|   5 +
 .../hdfs/server/namenode/INodesInPath.java  |   6 +
 .../namenode/TestFSPermissionChecker.java   |   4 +-
 .../namenode/TestINodeAttributeProvider.java| 229 +++
 15 files changed, 659 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53a28afe/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5ade5fb..4bed2ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -390,6 +390,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7838. Expose truncate API for libhdfs. (yliu)
 
+HDFS-6826. Plugin interface to enable delegation of HDFS authorization 
+assertions. (Arun Suresh via jitendra)
+
   IMPROVEMENTS
 
 HDFS-7752. Improve description for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53a28afe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9ecf242..b5bbe5f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -477,6 +477,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_IPC_ADDRESS_DEFAULT = 0.0.0.0: + 
DFS_DATANODE_IPC_DEFAULT_PORT;
   public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY 
= dfs.datanode.min.supported.namenode.version;
   public static final String  
DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = 3.0.0-SNAPSHOT;
+  public static final String  DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY = 
dfs.namenode.inode.attributes.provider.class;
 
   public static final String  DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = 
dfs.block.access.token.enable;
   public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53a28afe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
new file mode 100644
index 000..45aa1b5
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this 

hadoop git commit: HDFS-7713. Implement mkdirs in the HDFS Web UI. Contributed by Ravi Prakash.

2015-03-24 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk a16bfff71 - e38ef70fb


HDFS-7713. Implement mkdirs in the HDFS Web UI. Contributed by Ravi Prakash.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e38ef70f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e38ef70f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e38ef70f

Branch: refs/heads/trunk
Commit: e38ef70fbc60f062992c834b1cca6e9ba4baef6e
Parents: a16bfff
Author: Haohui Mai whe...@apache.org
Authored: Tue Mar 24 15:48:52 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue Mar 24 15:48:52 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../src/main/webapps/hdfs/explorer.html | 53 ++--
 .../src/main/webapps/hdfs/explorer.js   | 22 
 3 files changed, 72 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e38ef70f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4ec0891..5ade5fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -335,6 +335,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7854. Separate class DataStreamer out of DFSOutputStream. (Li Bo via
 jing9)
 
+HDFS-7713. Implement mkdirs in the HDFS Web UI. (Ravi Prakash via wheat9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e38ef70f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 7b34044..cd6623c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -87,13 +87,56 @@
button type=button class=close 
onclick=$('#alert-panel').hide();times;/button
div class=alert-body id=alert-panel-body/div
   /div
+
+div class=modal id=btn-create-directory tabindex=-1 role=dialog
+  aria-hidden=true
+  div class=modal-dialog
+div class=modal-content
+  div class=modal-header
+button type=button class=close
+  data-dismiss=modal aria-hidden=truetimes;/button
+h4 class=modal-titleCreate Directory/h4
+  /div
+  div class=modal-body
+div class=form-group
+  div class=input-group
+span class=input-group-addon id=new_directory_pwd/span
+input type=text class=form-control id=new_directory
+  placeholder=New Directory Name /
+  /div
+/div
+  /div
+  div class=modal-footer
+button type=button class=btn 
data-dismiss=modalCancel/button
+button type=button class=btn btn-success
+  id=btn-create-directory-send data-complete-text=Creating...
+  Create
+/button
+  /div
+/div
+  /div
+/div
+
   div class=row
-   form onsubmit=return false;
- div class=input-groupinput type=text class=form-control id=
- directory / span 
class=input-group-btnbutton class=btn btn-default
-   
   type=submit id=btn-nav-directoryspan 
class=input-group-btnGo!/span/button/span/div
-   /form
+  div class=col-xs-11
+form onsubmit=return false;
+  div class=input-group
+input type=text class=form-control id=directory/
+span class=input-group-btn
+  button class=btn btn-default type=button 
id=btn-nav-directoryGo!/button
+/span
+  /div
+/form
+  /div
+  div class=col-xs-1
+button type=button class=btn btn-default data-toggle=modal
+  aria-label=New Directory data-target=#btn-create-directory
+  title=Create Directory
+span class=glyphicon glyphicon-folder-open/span
+/button
   /div
+/div
+
   br /
   div id=panel/div
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e38ef70f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 131b2aa..5572880 100644
--- 

hadoop git commit: HDFS-6826. Plugin interface to enable delegation of HDFS authorization assertions. Contributed by Arun Suresh.

2015-03-24 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 161dae898 - 456cec127


HDFS-6826. Plugin interface to enable delegation of HDFS authorization 
assertions. Contributed by Arun Suresh.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/456cec12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/456cec12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/456cec12

Branch: refs/heads/branch-2
Commit: 456cec127b23b9195784dd4b35b75a2b69ad2a4a
Parents: 161dae8
Author: Jitendra Pandey Jitendra@Jitendra-Pandeys-MacBook-Pro-4.local
Authored: Tue Mar 24 15:43:03 2015 -0700
Committer: Jitendra Pandey Jitendra@Jitendra-Pandeys-MacBook-Pro-4.local
Committed: Tue Mar 24 16:02:21 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../DefaultINodeAttributesProvider.java |  45 
 .../server/namenode/FSDirStatAndListingOp.java  |  51 +++--
 .../hdfs/server/namenode/FSDirectory.java   |  41 +++-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  19 ++
 .../server/namenode/FSPermissionChecker.java| 222 +++---
 .../server/namenode/INodeAttributeProvider.java | 135 +++
 .../hdfs/server/namenode/INodeAttributes.java   |   3 +
 .../namenode/INodeDirectoryAttributes.java  |   4 +
 .../server/namenode/INodeFileAttributes.java|   5 +
 .../hdfs/server/namenode/INodesInPath.java  |   6 +
 .../namenode/TestFSPermissionChecker.java   |   4 +-
 .../namenode/TestINodeAttributeProvider.java| 229 +++
 15 files changed, 659 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/456cec12/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bb9f7ff..01d678f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -79,6 +79,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7838. Expose truncate API for libhdfs. (yliu)
 
+HDFS-6826. Plugin interface to enable delegation of HDFS authorization 
+assertions. (Arun Suresh via jitendra)
+
   IMPROVEMENTS
 
 HDFS-7752. Improve description for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456cec12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0894e0f..18f4686 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -483,6 +483,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_IPC_ADDRESS_DEFAULT = 0.0.0.0: + 
DFS_DATANODE_IPC_DEFAULT_PORT;
   public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY 
= dfs.datanode.min.supported.namenode.version;
   public static final String  
DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = 2.1.0-beta;
+  public static final String  DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY = 
dfs.namenode.inode.attributes.provider.class;
 
   public static final String  DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = 
dfs.block.access.token.enable;
   public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456cec12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
new file mode 100644
index 000..45aa1b5
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF 

hadoop git commit: HDFS-6826. Plugin interface to enable delegation of HDFS authorization assertions. Contributed by Arun Suresh.

2015-03-24 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 14d1cccfa - d286673c6


HDFS-6826. Plugin interface to enable delegation of HDFS authorization 
assertions. Contributed by Arun Suresh.

(cherry picked from commit 456cec127b23b9195784dd4b35b75a2b69ad2a4a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d286673c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d286673c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d286673c

Branch: refs/heads/branch-2.7
Commit: d286673c602524af08935ea132c8afd181b6e2e4
Parents: 14d1ccc
Author: Jitendra Pandey Jitendra@Jitendra-Pandeys-MacBook-Pro-4.local
Authored: Tue Mar 24 16:17:06 2015 -0700
Committer: Jitendra Pandey Jitendra@Jitendra-Pandeys-MacBook-Pro-4.local
Committed: Tue Mar 24 16:17:06 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../DefaultINodeAttributesProvider.java |  45 
 .../server/namenode/FSDirStatAndListingOp.java  |  51 +++--
 .../hdfs/server/namenode/FSDirectory.java   |  41 +++-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  19 ++
 .../server/namenode/FSPermissionChecker.java| 222 +++---
 .../server/namenode/INodeAttributeProvider.java | 135 +++
 .../hdfs/server/namenode/INodeAttributes.java   |   3 +
 .../namenode/INodeDirectoryAttributes.java  |   4 +
 .../server/namenode/INodeFileAttributes.java|   5 +
 .../hdfs/server/namenode/INodesInPath.java  |   6 +
 .../namenode/TestFSPermissionChecker.java   |   4 +-
 .../namenode/TestINodeAttributeProvider.java| 229 +++
 15 files changed, 659 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d286673c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4dcf9eb..cb6b88d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -41,6 +41,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7838. Expose truncate API for libhdfs. (yliu)
 
+HDFS-6826. Plugin interface to enable delegation of HDFS authorization 
+assertions. (Arun Suresh via jitendra)
+
   IMPROVEMENTS
 
 HDFS-7752. Improve description for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d286673c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d1c37df..c80f383 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -480,6 +480,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_IPC_ADDRESS_DEFAULT = 0.0.0.0: + 
DFS_DATANODE_IPC_DEFAULT_PORT;
   public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY 
= dfs.datanode.min.supported.namenode.version;
   public static final String  
DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = 2.1.0-beta;
+  public static final String  DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY = 
dfs.namenode.inode.attributes.provider.class;
 
   public static final String  DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = 
dfs.block.access.token.enable;
   public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d286673c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
new file mode 100644
index 000..45aa1b5
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you 

hadoop git commit: HDFS-7961. Trigger full block report after hot swapping disk. Contributed by Eddy Xu.

2015-03-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 01460c0bc - a7f447d46


HDFS-7961. Trigger full block report after hot swapping disk. Contributed by 
Eddy Xu.

(cherry picked from commit 6413d34986f3399023426c89c9a0d401c9557716)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7f447d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7f447d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7f447d4

Branch: refs/heads/branch-2.7
Commit: a7f447d4602e273626f57960a0141e549bce7059
Parents: 01460c0
Author: Andrew Wang w...@apache.org
Authored: Tue Mar 24 09:07:02 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Tue Mar 24 09:07:27 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  4 ++
 .../datanode/TestDataNodeHotSwapVolumes.java| 42 
 3 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f447d4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e2729a7..7092f70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -922,6 +922,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7960. The full block report should prune zombie storages even if
 they're not empty. (cmccabe and Eddy Xu via wang)
 
+HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via 
wang)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f447d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index b32a0fc..c6641f5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -637,6 +637,10 @@ public class DataNode extends ReconfigurableBase
   conf.set(DFS_DATANODE_DATA_DIR_KEY,
   Joiner.on(,).join(effectiveVolumes));
   dataDirs = getStorageLocations(conf);
+
+  // Send a full block report to let NN acknowledge the volume changes.
+  triggerBlockReport(new BlockReportOptions.Factory()
+  .setIncremental(false).build());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f447d4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 2f51d45..f5772e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -34,12 +34,16 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
@@ -59,6 +63,7 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import 

hadoop git commit: HDFS-7961. Trigger full block report after hot swapping disk. Contributed by Eddy Xu.

2015-03-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4170c9914 - 6413d3498


HDFS-7961. Trigger full block report after hot swapping disk. Contributed by 
Eddy Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6413d349
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6413d349
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6413d349

Branch: refs/heads/trunk
Commit: 6413d34986f3399023426c89c9a0d401c9557716
Parents: 4170c99
Author: Andrew Wang w...@apache.org
Authored: Tue Mar 24 09:07:02 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Tue Mar 24 09:07:02 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  4 ++
 .../datanode/TestDataNodeHotSwapVolumes.java| 42 
 3 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6413d349/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ee9a5db..70be18a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1251,6 +1251,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7960. The full block report should prune zombie storages even if
 they're not empty. (cmccabe and Eddy Xu via wang)
 
+HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via 
wang)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6413d349/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index e9befb4..d94375e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -634,6 +634,10 @@ public class DataNode extends ReconfigurableBase
   conf.set(DFS_DATANODE_DATA_DIR_KEY,
   Joiner.on(,).join(effectiveVolumes));
   dataDirs = getStorageLocations(conf);
+
+  // Send a full block report to let NN acknowledge the volume changes.
+  triggerBlockReport(new BlockReportOptions.Factory()
+  .setIncremental(false).build());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6413d349/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 2f51d45..f5772e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -34,12 +34,16 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
@@ -59,6 +63,7 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.mockito.Mockito;
 
 import static 

hadoop git commit: MAPREDUCE-6285. ClientServiceDelegate should not retry upon AuthenticationException. Contributed by Jonathan Eagles.

2015-03-24 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3ca5bd163 - 4170c9914


MAPREDUCE-6285. ClientServiceDelegate should not retry upon 
AuthenticationException. Contributed by Jonathan Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4170c991
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4170c991
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4170c991

Branch: refs/heads/trunk
Commit: 4170c99147b0cb6d561ff626cea140e0a061b314
Parents: 3ca5bd1
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Mar 25 00:56:26 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Mar 25 00:56:26 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../hadoop/mapred/ClientServiceDelegate.java|  6 +++
 .../mapred/TestClientServiceDelegate.java   | 44 
 3 files changed, 53 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4170c991/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b8a2a1c..2b16c30 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -496,6 +496,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6275. Race condition in FileOutputCommitter v2 for
 user-specified task output subdirs (Gera Shegalov and Siqi Li via jlowe)
 
+MAPREDUCE-6285. ClientServiceDelegate should not retry upon
+AuthenticationException. (Jonathan Eagles via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4170c991/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 686fa0c..8517c19 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -64,6 +64,7 @@ import 
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -328,6 +329,11 @@ public class ClientServiceDelegate {
 // Force reconnection by setting the proxy to null.
 realProxy = null;
 // HS/AMS shut down
+
+if (e.getCause() instanceof AuthorizationException) {
+  throw new IOException(e.getTargetException());
+}
+
 // if it's AM shut down, do not decrement maxClientRetry as we wait for
 // AM to be restarted.
 if (!usingAMProxy.get()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4170c991/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
index 7d6b2f3..b85f18d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 

hadoop git commit: MAPREDUCE-6285. ClientServiceDelegate should not retry upon AuthenticationException. Contributed by Jonathan Eagles.

2015-03-24 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 913fdea4e - 01460c0bc


MAPREDUCE-6285. ClientServiceDelegate should not retry upon 
AuthenticationException. Contributed by Jonathan Eagles.

(cherry picked from commit 4170c99147b0cb6d561ff626cea140e0a061b314)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01460c0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01460c0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01460c0b

Branch: refs/heads/branch-2.7
Commit: 01460c0bc9b4cbfa6556aae38b4d0a4e09f3aa5e
Parents: 913fdea
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Mar 25 00:56:26 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Mar 25 00:57:26 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../hadoop/mapred/ClientServiceDelegate.java|  6 +++
 .../mapred/TestClientServiceDelegate.java   | 44 
 3 files changed, 53 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01460c0b/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 6dc4cab..b005c59 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -190,6 +190,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6275. Race condition in FileOutputCommitter v2 for
 user-specified task output subdirs (Gera Shegalov and Siqi Li via jlowe)
 
+MAPREDUCE-6285. ClientServiceDelegate should not retry upon
+AuthenticationException. (Jonathan Eagles via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01460c0b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 686fa0c..8517c19 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -64,6 +64,7 @@ import 
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -328,6 +329,11 @@ public class ClientServiceDelegate {
 // Force reconnection by setting the proxy to null.
 realProxy = null;
 // HS/AMS shut down
+
+if (e.getCause() instanceof AuthorizationException) {
+  throw new IOException(e.getTargetException());
+}
+
 // if it's AM shut down, do not decrement maxClientRetry as we wait for
 // AM to be restarted.
 if (!usingAMProxy.get()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01460c0b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
index 7d6b2f3..b85f18d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import 

hadoop git commit: HDFS-7961. Trigger full block report after hot swapping disk. Contributed by Eddy Xu.

2015-03-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 94eb8e5b3 - e39c58fd9


HDFS-7961. Trigger full block report after hot swapping disk. Contributed by 
Eddy Xu.

(cherry picked from commit 6413d34986f3399023426c89c9a0d401c9557716)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e39c58fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e39c58fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e39c58fd

Branch: refs/heads/branch-2
Commit: e39c58fd9dfa03851301d105abcb2e62f757b3f5
Parents: 94eb8e5
Author: Andrew Wang w...@apache.org
Authored: Tue Mar 24 09:07:02 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Tue Mar 24 09:07:24 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  4 ++
 .../datanode/TestDataNodeHotSwapVolumes.java| 42 
 3 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e39c58fd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8977904..dda75f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -951,6 +951,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7960. The full block report should prune zombie storages even if
 they're not empty. (cmccabe and Eddy Xu via wang)
 
+HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via 
wang)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e39c58fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index b32a0fc..c6641f5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -637,6 +637,10 @@ public class DataNode extends ReconfigurableBase
   conf.set(DFS_DATANODE_DATA_DIR_KEY,
   Joiner.on(,).join(effectiveVolumes));
   dataDirs = getStorageLocations(conf);
+
+  // Send a full block report to let NN acknowledge the volume changes.
+  triggerBlockReport(new BlockReportOptions.Factory()
+  .setIncremental(false).build());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e39c58fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 2f51d45..f5772e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -34,12 +34,16 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
@@ -59,6 +63,7 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import 

hadoop git commit: HDFS-6649. Documentation for setrep is wrong. (aajisaka)

2015-03-24 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-1 552bafde0 - 8151679f8


HDFS-6649. Documentation for setrep is wrong. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8151679f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8151679f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8151679f

Branch: refs/heads/branch-1
Commit: 8151679f832124a8131dba1ef652f2899b999489
Parents: 552bafd
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Mar 25 13:13:10 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed Mar 25 13:13:10 2015 +0900

--
 CHANGES.txt |  2 ++
 .../documentation/content/xdocs/file_system_shell.xml   | 12 
 2 files changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8151679f/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index 6abf1f9..57f1cf4 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -280,6 +280,8 @@ Release 1.3.0 - unreleased
 HDFS-7312. Update DistCp v1 to optionally not use tmp location.
 (Joseph Prosser via yzhangal)
 
+HDFS-6649. Documentation for setrep is wrong. (aajisaka)
+
 Release 1.2.2 - unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8151679f/src/docs/src/documentation/content/xdocs/file_system_shell.xml
--
diff --git a/src/docs/src/documentation/content/xdocs/file_system_shell.xml 
b/src/docs/src/documentation/content/xdocs/file_system_shell.xml
index 49e31dd..4b24c3f 100644
--- a/src/docs/src/documentation/content/xdocs/file_system_shell.xml
+++ b/src/docs/src/documentation/content/xdocs/file_system_shell.xml
@@ -460,15 +460,19 @@
section
title setrep /title
p
-   codeUsage: hdfs dfs -setrep [-R] 
lt;pathgt;/code
+   codeUsage: hdfs dfs -setrep [-R] [-w] 
lt;repgt; lt;path/filegt;/code
/p
p
-  Changes the replication factor of a file. -R option is for 
recursively increasing the replication factor of files within a directory.
- /p
+   Changes the replication factor of a file.
+   -R option is for recursively increasing the replication 
factor
+   of files within a directory. -w option requests that 
the command
+   wait for the replication to complete. This can 
potentially
+   take a long time.
+   /p
pExample:/p
ul
li
-   code hdfs dfs -setrep -w 3 -R 
/user/hadoop/dir1 /code
+   code hdfs dfs -setrep -R -w 3 
/user/hadoop/dir1 /code
/li
/ul
pExit Code:/p



hadoop git commit: HDFS-7985. WebHDFS should be always enabled. Contributed by Li Lu.

2015-03-24 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 53a28afe2 - 80278a5f8


HDFS-7985. WebHDFS should be always enabled. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80278a5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80278a5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80278a5f

Branch: refs/heads/trunk
Commit: 80278a5f85a91b3e02e700e0b3c0a433c15e0565
Parents: 53a28af
Author: Haohui Mai whe...@apache.org
Authored: Tue Mar 24 21:55:56 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue Mar 24 21:55:56 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 -
 .../server/namenode/NameNodeHttpServer.java | 46 ++--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  7 ---
 .../src/main/native/libhdfs/native_mini_dfs.c   | 16 ---
 .../org/apache/hadoop/fs/TestSymlinkHdfs.java   |  1 -
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  1 -
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |  4 +-
 .../hdfs/security/TestDelegationToken.java  |  1 -
 .../TestDelegationTokenForProxyUser.java|  1 -
 .../hdfs/server/namenode/TestAuditLogs.java |  1 -
 .../TestNameNodeRespectsBindHostKeys.java   |  1 -
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   |  1 -
 .../hadoop/hdfs/web/TestHttpsFileSystem.java|  1 -
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 12 -
 .../hdfs/web/TestWebHdfsFileSystemContract.java |  1 -
 .../hadoop/hdfs/web/TestWebHdfsTokens.java  |  1 -
 .../web/TestWebHdfsWithMultipleNameNodes.java   |  2 -
 .../apache/hadoop/hdfs/web/WebHdfsTestUtil.java |  1 -
 19 files changed, 25 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80278a5f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4bed2ab..8d7a4e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -18,6 +18,8 @@ Trunk (Unreleased)
 option since it may incorrectly finalize an ongoing rolling upgrade.
 (Kai Sasaki via szetszwo)
 
+HDFS-7985. WebHDFS should be always enabled. (Li Lu via wheat9)
+
   NEW FEATURES
 
 HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80278a5f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b5bbe5f..d714276 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -227,8 +227,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int 
DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = 
dfs.web.authentication.filter;
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = 
AuthFilter.class.getName();
-  public static final String  DFS_WEBHDFS_ENABLED_KEY = dfs.webhdfs.enabled;
-  public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
   public static final String  DFS_WEBHDFS_USER_PATTERN_KEY = 
dfs.webhdfs.user.provider.user.pattern;
   public static final String  DFS_WEBHDFS_USER_PATTERN_DEFAULT = 
^[A-Za-z_][A-Za-z0-9._-]*[$]?$;
   public static final String  DFS_PERMISSIONS_ENABLED_KEY = 
dfs.permissions.enabled;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80278a5f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index 662c0e9..a671d21 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -67,30 +67,28 @@ public class NameNodeHttpServer {
   }
 
   private void initWebHdfs(Configuration conf) throws IOException {
-if 

hadoop git commit: HDFS-7875. Improve log message when wrong value configured for dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.

2015-03-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 51f1f4937 - eda02540c


HDFS-7875. Improve log message when wrong value configured for 
dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eda02540
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eda02540
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eda02540

Branch: refs/heads/trunk
Commit: eda02540ce53732585b3f31411b2e65db569eb25
Parents: 51f1f49
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 24 23:03:30 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 24 23:06:18 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 4 
 .../hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 6 --
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eda02540/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4f3937a..3725a03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -321,6 +321,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-7875. Improve log message when wrong value configured for
+dfs.datanode.failed.volumes.tolerated.
+(nijel via harsh)
+
 HDFS-2360. Ugly stacktrace when quota exceeds. (harsh)
 
 HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eda02540/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d42c00c..05c4871 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -276,8 +276,10 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 this.validVolsRequired = volsConfigured - volFailuresTolerated;
 
 if (volFailuresTolerated  0 || volFailuresTolerated = volsConfigured) {
-  throw new DiskErrorException(Invalid volume failure 
-  +  config value:  + volFailuresTolerated);
+  throw new DiskErrorException(Invalid value configured for 
+  + dfs.datanode.failed.volumes.tolerated -  + volFailuresTolerated
+  + . Value configured is either less than 0 or = 
+  + to the number of configured volumes ( + volsConfigured + ).);
 }
 if (volsFailed  volFailuresTolerated) {
   throw new DiskErrorException(Too many failed volumes - 



hadoop git commit: HDFS-7875. Improve log message when wrong value configured for dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.

2015-03-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 95bde8898 - 608ad6c2c


HDFS-7875. Improve log message when wrong value configured for 
dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.

(cherry picked from commit 2da3d2ed2ff2e9b48dbda7e029aa58261c729d35)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/608ad6c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/608ad6c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/608ad6c2

Branch: refs/heads/branch-2
Commit: 608ad6c2cd18234ffb551e0784f260e0b3faf402
Parents: 95bde88
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 24 23:03:30 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 24 23:03:56 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 4 
 .../hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 6 --
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ad6c2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5f289dd..19c5529 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -8,6 +8,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-7875. Improve log message when wrong value configured for
+dfs.datanode.failed.volumes.tolerated.
+(nijel via harsh)
+
 HDFS-2360. Ugly stacktrace when quota exceeds. (harsh)
 
 HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ad6c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 6a15906..69a80c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -278,8 +278,10 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 this.validVolsRequired = volsConfigured - volFailuresTolerated;
 
 if (volFailuresTolerated  0 || volFailuresTolerated = volsConfigured) {
-  throw new DiskErrorException(Invalid volume failure 
-  +  config value:  + volFailuresTolerated);
+  throw new DiskErrorException(Invalid value configured for 
+  + dfs.datanode.failed.volumes.tolerated -  + volFailuresTolerated
+  + . Value configured is either less than 0 or = 
+  + to the number of configured volumes ( + volsConfigured + ).);
 }
 if (volsFailed  volFailuresTolerated) {
   throw new DiskErrorException(Too many failed volumes - 



hadoop git commit: HDFS-7977. NFS couldn't take percentile intervals. Contributed by Brandon Li

2015-03-24 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 cc5922607 - 14d1cccfa


HDFS-7977. NFS couldn't take percentile intervals. Contributed by Brandon Li

(cherry picked from commit 570a83ae80faf2076966acf30588733803327844)
(cherry picked from commit 260dbe96c3d21ea372c728b2cd0f4a6aeb970010)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14d1cccf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14d1cccf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14d1cccf

Branch: refs/heads/branch-2.7
Commit: 14d1cccfada2a4023ed70a2f9fdb165f0de2c61d
Parents: cc59226
Author: Brandon Li brando...@apache.org
Authored: Tue Mar 24 10:49:16 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Tue Mar 24 10:53:20 2015 -0700

--
 .../org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java |  1 -
 .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java   | 13 ++---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  2 ++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md| 12 
 4 files changed, 20 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d1cccf/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
index 09ee579..05cc0b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
@@ -72,7 +72,6 @@ public class NfsConfigKeys {
   public static final String NFS_HTTPS_ADDRESS_DEFAULT = 0.0.0.0: + 
NFS_HTTPS_PORT_DEFAULT;
   
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_KEY = 
nfs.metrics.percentiles.intervals;
-  public static final String  NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = ;
   
   /*
* HDFS super-user is the user with the same identity as NameNode process

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d1cccf/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
index d36ea73..880a8a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
@@ -90,9 +90,9 @@ public class Nfs3Metrics {
   readNanosQuantiles[i] = registry.newQuantiles(readProcessNanos
   + interval + s, Read process in ns, ops, latency, interval);
   writeNanosQuantiles[i] = registry.newQuantiles(writeProcessNanos
-  + interval + s,  process in ns, ops, latency, interval);
+  + interval + s, Write process in ns, ops, latency, interval);
   commitNanosQuantiles[i] = registry.newQuantiles(commitProcessNanos
-  + interval + s, Read process in ns, ops, latency, interval);
+  + interval + s, Commit process in ns, ops, latency, 
interval);
 }
   }
 
@@ -101,10 +101,9 @@ public class Nfs3Metrics {
 MetricsSystem ms = DefaultMetricsSystem.instance();
 JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);
 
-// Percentile measurement is [,,,] by default 
-int[] intervals = conf.getInts(conf.get(
-NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY,
-NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT));
+// Percentile measurement is [50th,75th,90th,95th,99th] currently 
+int[] intervals = conf
+.getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
 return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
   }
   
@@ -217,4 +216,4 @@ public class Nfs3Metrics {
 }
   }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d1cccf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e4d737f..4dcf9eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -927,6 +927,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7961. Trigger full block report 

[01/50] [abbrv] hadoop git commit: YARN-2868. FairScheduler: Metric for latency to allocate first container for an application. (Ray Chiang via kasha)

2015-03-24 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 cbc9f1109 - df6249708 (forced update)


YARN-2868. FairScheduler: Metric for latency to allocate first container for an 
application. (Ray Chiang via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/972f1f1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/972f1f1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/972f1f1a

Branch: refs/heads/HDFS-7285
Commit: 972f1f1ab94a26ec446a272ad030fe13f03ed442
Parents: 2bc097c
Author: Karthik Kambatla ka...@apache.org
Authored: Mon Mar 23 14:07:05 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Mon Mar 23 14:07:05 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt|  3 +++
 .../resourcemanager/scheduler/QueueMetrics.java|  8 +++-
 .../scheduler/SchedulerApplicationAttempt.java | 17 +
 .../scheduler/fair/FairScheduler.java  | 11 ++-
 4 files changed, 37 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/972f1f1a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b716064..e7d4f59 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -73,6 +73,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3350. YARN RackResolver spams logs with messages at info level. 
 (Wilfred Spiegelenburg via junping_du)
 
+YARN-2868. FairScheduler: Metric for latency to allocate first container 
+for an application. (Ray Chiang via kasha)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/972f1f1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 507b798..58b1ed1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterInt;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -74,6 +75,7 @@ public class QueueMetrics implements MetricsSource {
   @Metric(# of reserved containers) MutableGaugeInt reservedContainers;
   @Metric(# of active users) MutableGaugeInt activeUsers;
   @Metric(# of active applications) MutableGaugeInt activeApplications;
+  @Metric(App Attempt First Container Allocation Delay) MutableRate 
appAttemptFirstContainerAllocationDelay;
   private final MutableGaugeInt[] runningTime;
   private TimeBucketMetricsApplicationId runBuckets;
 
@@ -462,7 +464,11 @@ public class QueueMetrics implements MetricsSource {
   parent.deactivateApp(user);
 }
   }
-  
+
+  public void addAppAttemptFirstContainerAllocationDelay(long latency) {
+appAttemptFirstContainerAllocationDelay.add(latency);
+  }
+
   public int getAppsSubmitted() {
 return appsSubmitted.value();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/972f1f1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 

[07/50] [abbrv] hadoop git commit: YARN-1880. Cleanup TestApplicationClientProtocolOnHA. Contributed by ozawa.

2015-03-24 Thread zhz
YARN-1880. Cleanup TestApplicationClientProtocolOnHA. Contributed by ozawa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbceb3b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbceb3b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbceb3b4

Branch: refs/heads/HDFS-7285
Commit: fbceb3b41834d6899c4353fb24f12ba3ecf67faf
Parents: 970ee3f
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 24 11:57:28 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 24 11:57:58 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../client/TestApplicationClientProtocolOnHA.java   | 16 ++--
 2 files changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbceb3b4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3d9f271..8a5e142 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -58,6 +58,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+YARN-1880. Cleanup TestApplicationClientProtocolOnHA
+(ozawa via harsh)
+
 YARN-3243. CapacityScheduler should pass headroom from parent to children
 to make sure ParentQueue obey its capacity limits. (Wangda Tan via jianhe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbceb3b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
index bfc6656..8e00554 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
@@ -93,7 +93,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   public void testGetApplicationsOnHA() throws Exception {
 ListApplicationReport reports =
 client.getApplications();
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeAppReports(),
 reports);
   }
@@ -101,7 +102,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   @Test(timeout = 15000)
   public void testGetClusterNodesOnHA() throws Exception {
 ListNodeReport reports = client.getNodeReports(NodeState.RUNNING);
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeNodeReports(),
 reports);
   }
@@ -117,8 +119,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   @Test(timeout = 15000)
   public void testGetQueueUserAclsOnHA() throws Exception {
 ListQueueUserACLInfo queueUserAclsList = client.getQueueAclsInfo();
-Assert.assertTrue(queueUserAclsList != null
- !queueUserAclsList.isEmpty());
+Assert.assertTrue(queueUserAclsList != null);
+Assert.assertFalse(queueUserAclsList.isEmpty());
 Assert.assertEquals(cluster.createFakeQueueUserACLInfoList(),
 queueUserAclsList);
   }
@@ -136,7 +138,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   public void testGetApplicationAttemptsOnHA() throws Exception {
 ListApplicationAttemptReport reports =
 client.getApplicationAttempts(cluster.createFakeAppId());
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeApplicationAttemptReports(),
 reports);
   }
@@ -153,7 +156,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   public void testGetContainersOnHA() throws Exception {
 ListContainerReport reports =
 client.getContainers(cluster.createFakeApplicationAttemptId());
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeContainerReports(),
 reports);
   }



[32/50] [abbrv] hadoop git commit: HDFS-7853. Erasure coding: extend LocatedBlocks to support reading from striped files. Contributed by Jing Zhao.

2015-03-24 Thread zhz
HDFS-7853. Erasure coding: extend LocatedBlocks to support reading from striped 
files. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2acf8b18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2acf8b18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2acf8b18

Branch: refs/heads/HDFS-7285
Commit: 2acf8b18d74d7300062904d307d8d33cab587648
Parents: 11b648b
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 9 14:59:58 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:33 2015 -0700

--
 .../hadoop/hdfs/protocol/LocatedBlock.java  |   5 +-
 .../hdfs/protocol/LocatedStripedBlock.java  |  68 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  14 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  13 +-
 .../DatanodeProtocolClientSideTranslatorPB.java |   2 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  80 +++
 .../blockmanagement/BlockInfoStriped.java   |   5 +
 .../BlockInfoStripedUnderConstruction.java  |  99 +++--
 .../server/blockmanagement/BlockManager.java|  51 ---
 .../blockmanagement/DatanodeDescriptor.java |   4 +-
 .../blockmanagement/DatanodeStorageInfo.java|   3 +-
 .../server/namenode/FSImageFormatPBINode.java   |  21 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  34 +++--
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |   1 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  12 ++
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  16 +--
 .../datanode/TestIncrementalBrVariations.java   |  14 +-
 .../server/namenode/TestAddStripedBlocks.java   | 141 +++
 .../hdfs/server/namenode/TestFSImage.java   |   5 +-
 20 files changed, 444 insertions(+), 146 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2acf8b18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index e729869..a38e8f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.security.token.Token;
 
 import com.google.common.collect.Lists;
@@ -51,14 +50,14 @@ public class LocatedBlock {
   // else false. If block has few corrupt replicas, they are filtered and 
   // their locations are not part of this object
   private boolean corrupt;
-  private TokenBlockTokenIdentifier blockToken = new 
TokenBlockTokenIdentifier();
+  private TokenBlockTokenIdentifier blockToken = new Token();
   /**
* List of cached datanode locations
*/
   private DatanodeInfo[] cachedLocs;
 
   // Used when there are no locations
-  private static final DatanodeInfoWithStorage[] EMPTY_LOCS =
+  static final DatanodeInfoWithStorage[] EMPTY_LOCS =
   new DatanodeInfoWithStorage[0];
 
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2acf8b18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
new file mode 100644
index 000..97e3a69
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required 

[46/50] [abbrv] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, HDFS-7435 and HDFS-7930 (this commit is for HDFS-7930 only)

2015-03-24 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, 
HDFS-7435 and HDFS-7930 (this commit is for HDFS-7930 only)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d128ba8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d128ba8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d128ba8

Branch: refs/heads/HDFS-7285
Commit: 0d128ba8f96539653d4d2d89f43682318aa4ec47
Parents: 6978271
Author: Zhe Zhang z...@apache.org
Authored: Mon Mar 23 11:25:40 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:35 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java  | 7 ---
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 7 ---
 .../org/apache/hadoop/hdfs/server/namenode/INodeFile.java | 2 +-
 3 files changed, 9 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d128ba8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 300b767..9f5d2dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2112,17 +2112,18 @@ public class BlockManager {
* Mark block replicas as corrupt except those on the storages in 
* newStorages list.
*/
-  public void markBlockReplicasAsCorrupt(BlockInfoContiguous block, 
+  public void markBlockReplicasAsCorrupt(Block oldBlock,
+  BlockInfo block,
   long oldGenerationStamp, long oldNumBytes, 
   DatanodeStorageInfo[] newStorages) throws IOException {
 assert namesystem.hasWriteLock();
 BlockToMarkCorrupt b = null;
 if (block.getGenerationStamp() != oldGenerationStamp) {
-  b = new BlockToMarkCorrupt(block, oldGenerationStamp,
+  b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
   genstamp does not match  + oldGenerationStamp
   +  :  + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
 } else if (block.getNumBytes() != oldNumBytes) {
-  b = new BlockToMarkCorrupt(block,
+  b = new BlockToMarkCorrupt(oldBlock, block,
   length does not match  + oldNumBytes
   +  :  + block.getNumBytes(), Reason.SIZE_MISMATCH);
 } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d128ba8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 80fdd68..d42ff5d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2776,7 +2776,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   /** Compute quota change for converting a complete block to a UC block */
   private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
 final QuotaCounts delta = new QuotaCounts.Builder().build();
-final BlockInfoContiguous lastBlock = file.getLastBlock();
+final BlockInfo lastBlock = file.getLastBlock();
 if (lastBlock != null) {
   final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
   final short repl = file.getBlockReplication();
@@ -4371,8 +4371,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 } else {
   iFile.convertLastBlockToUC(storedBlock, trimmedStorageInfos);
   if (closeFile) {
-blockManager.markBlockReplicasAsCorrupt(storedBlock,
-oldGenerationStamp, oldNumBytes, trimmedStorageInfos);
+blockManager.markBlockReplicasAsCorrupt(oldBlock.getLocalBlock(),
+storedBlock, oldGenerationStamp, oldNumBytes,
+trimmedStorageInfos);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d128ba8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 

[26/50] [abbrv] hadoop git commit: HDFS-7749. Erasure Coding: Add striped block support in INodeFile. Contributed by Jing Zhao.

2015-03-24 Thread zhz
HDFS-7749. Erasure Coding: Add striped block support in INodeFile. Contributed 
by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fa8b6e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fa8b6e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fa8b6e5

Branch: refs/heads/HDFS-7285
Commit: 3fa8b6e52bab6a8bf2c21bdaf87d049b9777e861
Parents: 1f9cf96
Author: Jing Zhao ji...@apache.org
Authored: Wed Feb 25 22:10:26 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:32 2015 -0700

--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  17 ++
 .../server/blockmanagement/BlockCollection.java |  13 +-
 .../hdfs/server/blockmanagement/BlockInfo.java  |  88 ++-
 .../BlockInfoContiguousUnderConstruction.java   |   6 +-
 .../blockmanagement/BlockInfoStriped.java   |  31 +++
 .../BlockInfoStripedUnderConstruction.java  | 240 ++
 .../server/blockmanagement/BlockManager.java| 147 +--
 .../CacheReplicationMonitor.java|  16 +-
 .../hdfs/server/namenode/FSDirConcatOp.java |   8 +-
 .../hdfs/server/namenode/FSDirectory.java   |   5 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |   8 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  16 +-
 .../hdfs/server/namenode/FSImageFormat.java |   7 +-
 .../server/namenode/FSImageFormatPBINode.java   |  46 +++-
 .../hdfs/server/namenode/FSNamesystem.java  | 130 ++
 .../namenode/FileUnderConstructionFeature.java  |  15 +-
 .../namenode/FileWithStripedBlocksFeature.java  | 112 
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 254 +--
 .../hdfs/server/namenode/LeaseManager.java  |   6 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |   4 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |   3 +-
 .../snapshot/FSImageFormatPBSnapshot.java   |   7 +-
 .../server/namenode/snapshot/FileDiffList.java  |   9 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   5 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |  10 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   3 +-
 .../blockmanagement/TestReplicationPolicy.java  |   4 +-
 .../hdfs/server/namenode/TestAddBlock.java  |  12 +-
 .../hdfs/server/namenode/TestAddBlockgroup.java |   3 +-
 .../namenode/TestBlockUnderConstruction.java|   6 +-
 .../hdfs/server/namenode/TestFSImage.java   |   4 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |   4 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   4 +-
 .../snapshot/TestSnapshotBlocksMap.java |  24 +-
 .../namenode/snapshot/TestSnapshotDeletion.java |  16 +-
 35 files changed, 963 insertions(+), 320 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fa8b6e5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index b841850..a94921b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -172,6 +172,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos;
 import 
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
@@ -184,6 +185,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
@@ -430,6 +432,21 @@ public class PBHelper {
 return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
   }
 
+  public static BlockInfoStriped 

[12/50] [abbrv] hadoop git commit: HDFS-7961. Trigger full block report after hot swapping disk. Contributed by Eddy Xu.

2015-03-24 Thread zhz
HDFS-7961. Trigger full block report after hot swapping disk. Contributed by 
Eddy Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6413d349
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6413d349
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6413d349

Branch: refs/heads/HDFS-7285
Commit: 6413d34986f3399023426c89c9a0d401c9557716
Parents: 4170c99
Author: Andrew Wang w...@apache.org
Authored: Tue Mar 24 09:07:02 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Tue Mar 24 09:07:02 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  4 ++
 .../datanode/TestDataNodeHotSwapVolumes.java| 42 
 3 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6413d349/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ee9a5db..70be18a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1251,6 +1251,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7960. The full block report should prune zombie storages even if
 they're not empty. (cmccabe and Eddy Xu via wang)
 
+HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via 
wang)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6413d349/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index e9befb4..d94375e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -634,6 +634,10 @@ public class DataNode extends ReconfigurableBase
   conf.set(DFS_DATANODE_DATA_DIR_KEY,
   Joiner.on(,).join(effectiveVolumes));
   dataDirs = getStorageLocations(conf);
+
+  // Send a full block report to let NN acknowledge the volume changes.
+  triggerBlockReport(new BlockReportOptions.Factory()
+  .setIncremental(false).build());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6413d349/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 2f51d45..f5772e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -34,12 +34,16 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
@@ -59,6 +63,7 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.mockito.Mockito;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.hamcrest.CoreMatchers.anyOf;
@@ -70,6 

[50/50] [abbrv] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, HDFS-7435, HDFS-7930, HDFS-7960 (this commit is for HDFS-7960)

2015-03-24 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, 
HDFS-7435, HDFS-7930, HDFS-7960 (this commit is for HDFS-7960)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df624970
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df624970
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df624970

Branch: refs/heads/HDFS-7285
Commit: df6249708cdaacfe9ab9c27a0ef98098fdbb87a2
Parents: f91d9b2
Author: Zhe Zhang z...@apache.org
Authored: Tue Mar 24 11:39:36 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:39:36 2015 -0700

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 ++--
 .../blockmanagement/TestNameNodePrunesMissingStorages.java  | 5 -
 .../hadoop/hdfs/server/namenode/TestAddStripedBlocks.java   | 2 +-
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df624970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3402e36..a826d22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1973,10 +1973,10 @@ public class BlockManager {
  longer exists on the DataNode.,
   Long.toHexString(context.getReportId()), zombie.getStorageID());
 assert(namesystem.hasWriteLock());
-IteratorBlockInfoContiguous iter = zombie.getBlockIterator();
+IteratorBlockInfo iter = zombie.getBlockIterator();
 int prevBlocks = zombie.numBlocks();
 while (iter.hasNext()) {
-  BlockInfoContiguous block = iter.next();
+  BlockInfo block = iter.next();
   // We assume that a block can be on only one storage in a DataNode.
   // That's why we pass in the DatanodeDescriptor rather than the
   // DatanodeStorageInfo.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df624970/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index 4b97d01..e9329cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -171,9 +171,12 @@ public class TestNameNodePrunesMissingStorages {
   String datanodeUuid;
   // Find the first storage which this block is in.
   try {
+BlockInfo storedBlock =
+cluster.getNamesystem().getBlockManager().
+getStoredBlock(block.getLocalBlock());
 IteratorDatanodeStorageInfo storageInfoIter =
 cluster.getNamesystem().getBlockManager().
-getStorages(block.getLocalBlock()).iterator();
+blocksMap.getStorages(storedBlock).iterator();
 assertTrue(storageInfoIter.hasNext());
 DatanodeStorageInfo info = storageInfoIter.next();
 storageIdToRemove = info.getStorageID();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df624970/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
index 05aec4b..7d7c81e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
@@ -269,7 +269,7 @@ public class TestAddStripedBlocks {
   StorageBlockReport[] reports = {new StorageBlockReport(storage,
   bll)};
   

[06/50] [abbrv] hadoop git commit: HDFS-7956. Improve logging for DatanodeRegistration. Contributed by Plamen Jeliazkov.

2015-03-24 Thread zhz
HDFS-7956. Improve logging for DatanodeRegistration. Contributed by Plamen 
Jeliazkov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/970ee3fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/970ee3fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/970ee3fc

Branch: refs/heads/HDFS-7285
Commit: 970ee3fc56a68afade98017296cf9d057f225a46
Parents: 50ee8f4
Author: Plamen Jeliazkov plamenj2...@gmail.com
Authored: Mon Mar 23 23:04:04 2015 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Mon Mar 23 23:05:21 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/970ee3fc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3dd5fb3..3ea1346 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -777,6 +777,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7917. Use file to replace data dirs in test to simulate a disk 
failure.
 (Lei (Eddy) Xu via cnauroth)
 
+HDFS-7956. Improve logging for DatanodeRegistration.
+(Plamen Jeliazkov via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/970ee3fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
index e788137..7119738 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
@@ -100,7 +100,7 @@ public class DatanodeRegistration extends DatanodeID
   @Override
   public String toString() {
 return getClass().getSimpleName()
-  + ( + getIpAddr()
+  + ( + super.toString()
   + , datanodeUuid= + getDatanodeUuid()
   + , infoPort= + getInfoPort()
   + , infoSecurePort= + getInfoSecurePort()



[31/50] [abbrv] hadoop git commit: HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng

2015-03-24 Thread zhz
HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f9cf96e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f9cf96e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f9cf96e

Branch: refs/heads/HDFS-7285
Commit: 1f9cf96e5aa43a85a3442216378662571135c6c9
Parents: 249ca62
Author: drankye dran...@gmail.com
Authored: Thu Feb 12 21:12:44 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:32 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   4 +
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  |  69 +++
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  |  78 +++
 .../erasurecode/rawcoder/RawErasureCoder.java   |   2 +-
 .../erasurecode/rawcoder/util/GaloisField.java  | 497 +++
 .../io/erasurecode/rawcoder/util/RSUtil.java|  22 +
 .../hadoop/io/erasurecode/TestCoderBase.java|  28 +-
 .../erasurecode/rawcoder/TestJRSRawCoder.java   |  93 
 .../erasurecode/rawcoder/TestRawCoderBase.java  |   5 +-
 .../erasurecode/rawcoder/TestXorRawCoder.java   |   1 -
 10 files changed, 786 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f9cf96e/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 9728f97..7bbacf7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -8,3 +8,7 @@
 
 HADOOP-11541. Raw XOR coder
 ( Kai Zheng )
+
+HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng
+( Kai Zheng )
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f9cf96e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
new file mode 100644
index 000..dbb689e
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw erasure decoder in RS code scheme in pure Java in case native one
+ * isn't available in some environment. Please always use native 
implementations
+ * when possible.
+ */
+public class JRSRawDecoder extends AbstractRawErasureDecoder {
+  // To describe and calculate the needed Vandermonde matrix
+  private int[] errSignature;
+  private int[] primitivePower;
+
+  @Override
+  public void initialize(int numDataUnits, int numParityUnits, int chunkSize) {
+super.initialize(numDataUnits, numParityUnits, chunkSize);
+assert (getNumDataUnits() + getNumParityUnits()  
RSUtil.GF.getFieldSize());
+
+this.errSignature = new int[getNumParityUnits()];
+this.primitivePower = RSUtil.getPrimitivePower(getNumDataUnits(),
+getNumParityUnits());
+  }
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+for (int i = 0; i  erasedIndexes.length; i++) {
+  errSignature[i] = primitivePower[erasedIndexes[i]];
+  RSUtil.GF.substitute(inputs, outputs[i], primitivePower[i]);
+}
+
+int dataLen = inputs[0].remaining();
+RSUtil.GF.solveVandermondeSystem(errSignature, outputs,
+erasedIndexes.length, dataLen);
+  }
+
+  @Override
+  protected void 

[04/50] [abbrv] hadoop git commit: HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp provided by the client is larger than the one stored in the datanode. Contributed by Bra

2015-03-24 Thread zhz
HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp 
provided by the client is larger than the one stored in the datanode.  
Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7e3c336
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7e3c336
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7e3c336

Branch: refs/heads/HDFS-7285
Commit: d7e3c3364eb904f55a878bc14c331952f9dadab2
Parents: 9fae455
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Tue Mar 24 13:49:17 2015 +0900
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Tue Mar 24 13:49:17 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 4 
 .../org/apache/hadoop/hdfs/server/datanode/BlockSender.java   | 7 +++
 2 files changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7e3c336/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b88b7e3..d2891e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1237,6 +1237,10 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7942. NFS: support regexp grouping in nfs.exports.allowed.hosts 
(brandonli)
 
+HDFS-7884. Fix NullPointerException in BlockSender when the generation 
stamp
+provided by the client is larger than the one stored in the datanode.
+(Brahma Reddy Battula via szetszwo)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7e3c336/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index f4cde11..e76b93a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -246,6 +246,13 @@ class BlockSender implements java.io.Closeable {
   if (replica.getGenerationStamp()  block.getGenerationStamp()) {
 throw new IOException(Replica gen stamp  block genstamp, block=
 + block + , replica= + replica);
+  } else if (replica.getGenerationStamp()  block.getGenerationStamp()) {
+if (DataNode.LOG.isDebugEnabled()) {
+  DataNode.LOG.debug(Bumping up the client provided
+  +  block's genstamp to latest  + replica.getGenerationStamp()
+  +  for block  + block);
+}
+block.setGenerationStamp(replica.getGenerationStamp());
   }
   if (replicaVisibleLength  0) {
 throw new IOException(Replica is not readable, block=



[40/50] [abbrv] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903 and HDFS-7435. Contributed by Zhe Zhang.

2015-03-24 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903 and 
HDFS-7435. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03ebc27f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03ebc27f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03ebc27f

Branch: refs/heads/HDFS-7285
Commit: 03ebc27f83866a9f3608266276b61cb582c7665e
Parents: 2825dd7
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 16 14:27:21 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:34 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/DecommissionManager.java | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 2 +-
 .../hadoop/hdfs/server/namenode/snapshot/FileDiffList.java  | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDecommission.java  | 5 ++---
 .../hadoop/hdfs/server/namenode/TestAddStripedBlocks.java   | 4 ++--
 5 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03ebc27f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 0faf3ad..df31d6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -536,7 +536,7 @@ public class DecommissionManager {
  */
 private void processBlocksForDecomInternal(
 final DatanodeDescriptor datanode,
-final IteratorBlockInfoContiguous it,
+final Iterator? extends BlockInfo it,
 final ListBlockInfoContiguous insufficientlyReplicated,
 boolean pruneSufficientlyReplicated) {
   boolean firstReplicationLog = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03ebc27f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b16eb7e..bea3bc8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1982,7 +1982,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 }
 
 // Check if the file is already being truncated with the same length
-final BlockInfoContiguous last = file.getLastBlock();
+final BlockInfo last = file.getLastBlock();
 if (last != null  last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) 
{
   final Block truncateBlock
   = ((BlockInfoContiguousUnderConstruction)last).getTruncateBlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03ebc27f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index a1263c5..d0248eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -21,6 +21,7 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@@ -132,7 +133,7 @@ public class FileDiffList extends
   break;
 }
 // Check if last block is part of truncate recovery
-BlockInfoContiguous lastBlock = file.getLastBlock();
+BlockInfo lastBlock = file.getLastBlock();
 Block 

[37/50] [abbrv] hadoop git commit: HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai Zheng

2015-03-24 Thread zhz
HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57551f1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57551f1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57551f1d

Branch: refs/heads/HDFS-7285
Commit: 57551f1d37e0c781a90cab52d211e3553af22247
Parents: 7d7e391
Author: drankye kai.zh...@intel.com
Authored: Thu Mar 5 22:51:52 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:33 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   4 +
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 203 +++
 .../hadoop/io/erasurecode/TestECSchema.java |  54 +
 3 files changed, 261 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57551f1d/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 7bbacf7..ee42c84 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -12,3 +12,7 @@
 HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng
 ( Kai Zheng )
 
+HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai 
Zheng
+( Kai Zheng )
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57551f1d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
new file mode 100644
index 000..8dc3f45
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ * Erasure coding schema to housekeeper relevant information.
+ */
+public class ECSchema {
+  public static final String NUM_DATA_UNITS_KEY = k;
+  public static final String NUM_PARITY_UNITS_KEY = m;
+  public static final String CODEC_NAME_KEY = codec;
+  public static final String CHUNK_SIZE_KEY = chunkSize;
+  public static final int DEFAULT_CHUNK_SIZE = 64 * 1024; // 64K
+
+  private String schemaName;
+  private String codecName;
+  private MapString, String options;
+  private int numDataUnits;
+  private int numParityUnits;
+  private int chunkSize;
+
+  /**
+   * Constructor with schema name and provided options. Note the options may
+   * contain additional information for the erasure codec to interpret further.
+   * @param schemaName schema name
+   * @param options schema options
+   */
+  public ECSchema(String schemaName, MapString, String options) {
+assert (schemaName != null  ! schemaName.isEmpty());
+
+this.schemaName = schemaName;
+
+if (options == null || options.isEmpty()) {
+  throw new IllegalArgumentException(No schema options are provided);
+}
+
+String codecName = options.get(CODEC_NAME_KEY);
+if (codecName == null || codecName.isEmpty()) {
+  throw new IllegalArgumentException(No codec option is provided);
+}
+
+int dataUnits = 0, parityUnits = 0;
+try {
+  if (options.containsKey(NUM_DATA_UNITS_KEY)) {
+dataUnits = Integer.parseInt(options.get(NUM_DATA_UNITS_KEY));
+  }
+} catch (NumberFormatException e) {
+  throw new IllegalArgumentException(Option value  +
+  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+   is found. It should be an integer);
+}
+
+try {
+  if 

[47/50] [abbrv] hadoop git commit: HDFS-7369. Erasure coding: distribute recovery work for striped blocks to DataNode. Contributed by Zhe Zhang.

2015-03-24 Thread zhz
HDFS-7369. Erasure coding: distribute recovery work for striped blocks to 
DataNode. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25806c07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25806c07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25806c07

Branch: refs/heads/HDFS-7285
Commit: 25806c074fc91a89e190fb9e45a90357bf12ca12
Parents: 1b31c21
Author: Zhe Zhang z...@apache.org
Authored: Wed Mar 18 15:52:36 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:35 2015 -0700

--
 .../server/blockmanagement/BlockCollection.java |   5 +
 .../server/blockmanagement/BlockManager.java| 296 +--
 .../blockmanagement/DatanodeDescriptor.java |  72 -
 .../server/blockmanagement/DatanodeManager.java |  20 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   9 +-
 .../server/protocol/BlockECRecoveryCommand.java |  63 
 .../hdfs/server/protocol/DatanodeProtocol.java  |   1 +
 .../blockmanagement/BlockManagerTestUtil.java   |   2 +-
 .../blockmanagement/TestBlockManager.java   |  22 +-
 .../TestRecoverStripedBlocks.java   | 107 +++
 10 files changed, 486 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25806c07/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 1c753de..62a5781 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -86,4 +86,9 @@ public interface BlockCollection {
* @return whether the block collection is under construction.
*/
   public boolean isUnderConstruction();
+
+  /**
+   * @return whether the block collection is in striping format
+   */
+  public boolean isStriped();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25806c07/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f7a00f0..300b767 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
@@ -531,9 +532,9 @@ public class BlockManager {
 
 NumberReplicas numReplicas = new NumberReplicas();
 // source node returned is not used
-chooseSourceDatanode(block, containingNodes,
+chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-UnderReplicatedBlocks.LEVEL);
+null, 1, UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1327,15 +1328,15 @@ public class BlockManager {
   }
 
   /**
-   * Scan blocks in {@link #neededReplications} and assign replication
-   * work to data-nodes they belong to.
+   * Scan blocks in {@link #neededReplications} and assign recovery
+   * (replication or erasure coding) work to data-nodes they belong to.
*
* The number of process blocks equals either twice the number of live
* data-nodes or the number of under-replicated blocks whichever is less.
*
* @return number of blocks scheduled for replication during this iteration.
*/
-  int computeReplicationWork(int blocksToProcess) {
+  int computeBlockRecoveryWork(int blocksToProcess) {
 ListListBlockInfo blocksToReplicate = null;
 namesystem.writeLock();
 try {
@@ -1345,30 +1346,32 @@ public class BlockManager {
 

[22/50] [abbrv] hadoop git commit: HDFS-7339. Allocating and persisting block groups in NameNode. Contributed by Zhe Zhang

2015-03-24 Thread zhz
HDFS-7339. Allocating and persisting block groups in NameNode. Contributed by 
Zhe Zhang

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a88a8b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a88a8b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a88a8b5

Branch: refs/heads/HDFS-7285
Commit: 6a88a8b57f461c4fc5d357c6702e8018bb9c8e9e
Parents: 5b810d0
Author: Zhe Zhang z...@apache.org
Authored: Fri Jan 30 16:16:26 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:31 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |  4 +
 .../server/blockmanagement/BlockIdManager.java  |  8 +-
 .../SequentialBlockGroupIdGenerator.java| 82 +++
 .../SequentialBlockIdGenerator.java |  6 +-
 .../hdfs/server/namenode/FSDirectory.java   |  8 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 34 +---
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 11 +++
 .../hdfs/server/namenode/TestAddBlockgroup.java | 84 
 9 files changed, 223 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a88a8b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9ecf242..2b62744 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -219,6 +219,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;
   public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY = 
dfs.namenode.replication.min;
   public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
+  public static final String  DFS_NAMENODE_STRIPE_MIN_KEY = 
dfs.namenode.stripe.min;
+  public static final int DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1;
   public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY 
= dfs.namenode.replication.pending.timeout-sec;
   public static final int 
DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = 
dfs.namenode.replication.max-streams;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a88a8b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 54c650b..de60b6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -181,4 +181,8 @@ public class HdfsConstants {
   public static final byte WARM_STORAGE_POLICY_ID = 5;
   public static final byte EC_STORAGE_POLICY_ID = 4;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
+
+  public static final byte NUM_DATA_BLOCKS = 3;
+  public static final byte NUM_PARITY_BLOCKS = 2;
+  public static final byte MAX_BLOCKS_IN_GROUP = 16;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a88a8b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 1c69203..c8b9d20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -53,10 +53,12 @@ public 

[24/50] [abbrv] hadoop git commit: Fix Compilation Error in TestAddBlockgroup.java after the merge

2015-03-24 Thread zhz
Fix Compilation Error in TestAddBlockgroup.java after the merge


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57084f55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57084f55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57084f55

Branch: refs/heads/HDFS-7285
Commit: 57084f55bba0876f9533d06e007450feb2518a20
Parents: ee9ce47
Author: Jing Zhao ji...@apache.org
Authored: Sun Feb 8 16:01:03 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:31 2015 -0700

--
 .../apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57084f55/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
index 95133ce..06dfade 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -75,7 +75,7 @@ public class TestAddBlockgroup {
 final Path file1 = new Path(/file1);
 DFSTestUtil.createFile(fs, file1, BLOCKSIZE * 2, REPLICATION, 0L);
 INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
-BlockInfo[] file1Blocks = file1Node.getBlocks();
+BlockInfoContiguous[] file1Blocks = file1Node.getBlocks();
 assertEquals(2, file1Blocks.length);
 assertEquals(GROUP_SIZE, file1Blocks[0].numNodes());
 assertEquals(HdfsConstants.MAX_BLOCKS_IN_GROUP,



[05/50] [abbrv] hadoop git commit: HDFS-7960. The full block report should prune zombie storages even if they're not empty. Contributed by Colin McCabe and Eddy Xu.

2015-03-24 Thread zhz
HDFS-7960. The full block report should prune zombie storages even if they're 
not empty. Contributed by Colin McCabe and Eddy Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50ee8f4e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50ee8f4e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50ee8f4e

Branch: refs/heads/HDFS-7285
Commit: 50ee8f4e67a66aa77c5359182f61f3e951844db6
Parents: d7e3c33
Author: Andrew Wang w...@apache.org
Authored: Mon Mar 23 22:00:34 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Mon Mar 23 22:00:34 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../DatanodeProtocolClientSideTranslatorPB.java |   5 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  15 +++
 .../server/blockmanagement/BlockManager.java|  53 +++-
 .../blockmanagement/DatanodeDescriptor.java |  51 ++-
 .../blockmanagement/DatanodeStorageInfo.java|  13 +-
 .../hdfs/server/datanode/BPServiceActor.java|  34 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  11 +-
 .../server/protocol/BlockReportContext.java |  52 +++
 .../hdfs/server/protocol/DatanodeProtocol.java  |  10 +-
 .../src/main/proto/DatanodeProtocol.proto   |  14 ++
 .../hdfs/protocol/TestBlockListAsLongs.java |   7 +-
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../TestNameNodePrunesMissingStorages.java  | 135 ++-
 .../server/datanode/BlockReportTestBase.java|   4 +-
 .../server/datanode/TestBPOfferService.java |  10 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   4 +-
 .../datanode/TestDataNodeVolumeFailure.java |   3 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 ...TestDnRespectsBlockReportSplitThreshold.java |   7 +-
 .../TestNNHandlesBlockReportPerStorage.java |   7 +-
 .../TestNNHandlesCombinedBlockReport.java   |   4 +-
 .../server/datanode/TestTriggerBlockReport.java |   7 +-
 .../server/namenode/NNThroughputBenchmark.java  |   9 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 .../hdfs/server/namenode/ha/TestDNFencing.java  |   4 +-
 27 files changed, 433 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50ee8f4e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d2891e3..3dd5fb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1241,6 +1241,9 @@ Release 2.7.0 - UNRELEASED
 provided by the client is larger than the one stored in the datanode.
 (Brahma Reddy Battula via szetszwo)
 
+HDFS-7960. The full block report should prune zombie storages even if
+they're not empty. (cmccabe and Eddy Xu via wang)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50ee8f4e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index c4003f1..825e835 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlo
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -169,7 +170,8 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
 
   @Override
   public DatanodeCommand blockReport(DatanodeRegistration registration,
-  String poolId, StorageBlockReport[] reports) throws IOException {
+  

[23/50] [abbrv] hadoop git commit: HADOOP-11534. Minor improvements for raw erasure coders ( Contributed by Kai Zheng )

2015-03-24 Thread zhz
HADOOP-11534. Minor improvements for raw erasure coders ( Contributed by Kai 
Zheng )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7680f1d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7680f1d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7680f1d1

Branch: refs/heads/HDFS-7285
Commit: 7680f1d1221d90b679ce7a85f0511218b86fbb40
Parents: 21bd6a4
Author: Vinayakumar B vinayakuma...@intel.com
Authored: Mon Feb 2 14:39:53 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:31 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt   |  5 -
 .../org/apache/hadoop/io/erasurecode/ECChunk.java| 15 +--
 .../rawcoder/AbstractRawErasureCoder.java| 12 ++--
 3 files changed, 23 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7680f1d1/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 8ce5a89..2124800 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -1,4 +1,7 @@
   BREAKDOWN OF HADOOP-11264 SUBTASKS AND RELATED JIRAS (Common part of 
HDFS-7285)
 
 HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding
-(Kai Zheng via umamahesh)
\ No newline at end of file
+(Kai Zheng via umamahesh)
+
+HADOOP-11534. Minor improvements for raw erasure coders
+( Kai Zheng via vinayakumarb )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7680f1d1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
index f84eb11..01e8f35 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -66,15 +66,26 @@ public class ECChunk {
   }
 
   /**
-   * Convert an array of this chunks to an array of byte array
+   * Convert an array of this chunks to an array of byte array.
+   * Note the chunk buffers are not affected.
* @param chunks
* @return an array of byte array
*/
   public static byte[][] toArray(ECChunk[] chunks) {
 byte[][] bytesArr = new byte[chunks.length][];
 
+ByteBuffer buffer;
 for (int i = 0; i  chunks.length; i++) {
-  bytesArr[i] = chunks[i].getBuffer().array();
+  buffer = chunks[i].getBuffer();
+  if (buffer.hasArray()) {
+bytesArr[i] = buffer.array();
+  } else {
+bytesArr[i] = new byte[buffer.remaining()];
+// Avoid affecting the original one
+buffer.mark();
+buffer.get(bytesArr[i]);
+buffer.reset();
+  }
 }
 
 return bytesArr;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7680f1d1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index 474542b..74d2ab6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -24,26 +24,26 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
  */
 public abstract class AbstractRawErasureCoder implements RawErasureCoder {
 
-  private int dataSize;
-  private int paritySize;
+  private int numDataUnits;
+  private int numParityUnits;
   private int chunkSize;
 
   @Override
   public void initialize(int numDataUnits, int numParityUnits,
  int chunkSize) {
-this.dataSize = numDataUnits;
-this.paritySize = numParityUnits;
+this.numDataUnits = numDataUnits;
+this.numParityUnits = numParityUnits;
 this.chunkSize = chunkSize;
   }
 
   @Override
   public int getNumDataUnits() {
-return dataSize;
+return numDataUnits;
   }
 
   @Override
   public int 

[02/50] [abbrv] hadoop git commit: HDFS-7917. Use file to replace data dirs in test to simulate a disk failure. Contributed by Lei (Eddy) Xu.

2015-03-24 Thread zhz
HDFS-7917. Use file to replace data dirs in test to simulate a disk failure. 
Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c238ae4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c238ae4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c238ae4

Branch: refs/heads/HDFS-7285
Commit: 2c238ae4e00371ef76582b007bb0e20ac8455d9c
Parents: 972f1f1
Author: cnauroth cnaur...@apache.org
Authored: Mon Mar 23 16:29:51 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon Mar 23 16:29:51 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/datanode/DataNodeTestUtils.java | 61 +++-
 .../datanode/TestDataNodeHotSwapVolumes.java| 29 --
 .../datanode/TestDataNodeVolumeFailure.java | 11 +---
 .../TestDataNodeVolumeFailureReporting.java | 46 ---
 .../TestDataNodeVolumeFailureToleration.java|  8 +--
 6 files changed, 88 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c238ae4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8c99876..b88b7e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -774,6 +774,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7962. Remove duplicated logs in BlockManager. (yliu)
 
+HDFS-7917. Use file to replace data dirs in test to simulate a disk 
failure.
+(Lei (Eddy) Xu via cnauroth)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c238ae4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index fd51e52..f9a2ba1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -40,7 +40,9 @@ import com.google.common.base.Preconditions;
  * Utility class for accessing package-private DataNode information during 
tests.
  *
  */
-public class DataNodeTestUtils {  
+public class DataNodeTestUtils {
+  private static final String DIR_FAILURE_SUFFIX = .origin;
+
   public static DatanodeRegistration 
   getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
 return dn.getDNRegistrationForBP(bpid);
@@ -159,4 +161,61 @@ public class DataNodeTestUtils {
   final String bpid, final long blkId) {
 return FsDatasetTestUtil.fetchReplicaInfo(dn.getFSDataset(), bpid, blkId);
   }
+
+  /**
+   * It injects disk failures to data dirs by replacing these data dirs with
+   * regular files.
+   *
+   * @param dirs data directories.
+   * @throws IOException on I/O error.
+   */
+  public static void injectDataDirFailure(File... dirs) throws IOException {
+for (File dir : dirs) {
+  File renamedTo = new File(dir.getPath() + DIR_FAILURE_SUFFIX);
+  if (renamedTo.exists()) {
+throw new IOException(String.format(
+Can not inject failure to dir: %s because %s exists.,
+dir, renamedTo));
+  }
+  if (!dir.renameTo(renamedTo)) {
+throw new IOException(String.format(Failed to rename %s to %s.,
+dir, renamedTo));
+  }
+  if (!dir.createNewFile()) {
+throw new IOException(String.format(
+Failed to create file %s to inject disk failure., dir));
+  }
+}
+  }
+
+  /**
+   * Restore the injected data dir failures.
+   *
+   * @see {@link #injectDataDirFailures}.
+   * @param dirs data directories.
+   * @throws IOException
+   */
+  public static void restoreDataDirFromFailure(File... dirs)
+  throws IOException {
+for (File dir : dirs) {
+  File renamedDir = new File(dir.getPath() + DIR_FAILURE_SUFFIX);
+  if (renamedDir.exists()) {
+if (dir.exists()) {
+  if (!dir.isFile()) {
+throw new IOException(
+Injected failure data dir is supposed to be file:  + dir);
+  }
+  if (!dir.delete()) {
+throw new IOException(
+Failed to delete injected failure data dir:  + dir);
+  }
+}
+if (!renamedDir.renameTo(dir)) {
+  throw new 

[29/50] [abbrv] hadoop git commit: HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info. Contributed by Jing Zhao.

2015-03-24 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/249ca621/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index be16a87..fa7f263 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -24,6 +24,7 @@ import java.util.List;
 import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
@@ -80,10 +81,10 @@ public class DatanodeStorageInfo {
   /**
* Iterates over the list of blocks belonging to the data-node.
*/
-  class BlockIterator implements IteratorBlockInfoContiguous {
-private BlockInfoContiguous current;
+  class BlockIterator implements IteratorBlockInfo {
+private BlockInfo current;
 
-BlockIterator(BlockInfoContiguous head) {
+BlockIterator(BlockInfo head) {
   this.current = head;
 }
 
@@ -91,8 +92,8 @@ public class DatanodeStorageInfo {
   return current != null;
 }
 
-public BlockInfoContiguous next() {
-  BlockInfoContiguous res = current;
+public BlockInfo next() {
+  BlockInfo res = current;
   current = 
current.getNext(current.findStorageInfo(DatanodeStorageInfo.this));
   return res;
 }
@@ -112,7 +113,7 @@ public class DatanodeStorageInfo {
   private volatile long remaining;
   private long blockPoolUsed;
 
-  private volatile BlockInfoContiguous blockList = null;
+  private volatile BlockInfo blockList = null;
   private int numBlocks = 0;
 
   // The ID of the last full block report which updated this storage.
@@ -226,7 +227,7 @@ public class DatanodeStorageInfo {
 return blockPoolUsed;
   }
 
-  public AddBlockResult addBlock(BlockInfoContiguous b) {
+  public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) {
 // First check whether the block belongs to a different storage
 // on the same DN.
 AddBlockResult result = AddBlockResult.ADDED;
@@ -245,13 +246,21 @@ public class DatanodeStorageInfo {
 }
 
 // add to the head of the data-node list
-b.addStorage(this);
+b.addStorage(this, reportedBlock);
+insertToList(b);
+return result;
+  }
+
+  AddBlockResult addBlock(BlockInfoContiguous b) {
+return addBlock(b, b);
+  }
+
+  public void insertToList(BlockInfo b) {
 blockList = b.listInsert(blockList, this);
 numBlocks++;
-return result;
   }
 
-  public boolean removeBlock(BlockInfoContiguous b) {
+  public boolean removeBlock(BlockInfo b) {
 blockList = b.listRemove(blockList, this);
 if (b.removeStorage(this)) {
   numBlocks--;
@@ -265,16 +274,15 @@ public class DatanodeStorageInfo {
 return numBlocks;
   }
   
-  IteratorBlockInfoContiguous getBlockIterator() {
+  IteratorBlockInfo getBlockIterator() {
 return new BlockIterator(blockList);
-
   }
 
   /**
* Move block to the head of the list of blocks belonging to the data-node.
* @return the index of the head of the blockList
*/
-  int moveBlockToHead(BlockInfoContiguous b, int curIndex, int headIndex) {
+  int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) {
 blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex);
 return curIndex;
   }
@@ -284,7 +292,7 @@ public class DatanodeStorageInfo {
* @return the head of the blockList
*/
   @VisibleForTesting
-  BlockInfoContiguous getBlockListHeadForTesting(){
+  BlockInfo getBlockListHeadForTesting(){
 return blockList;
   }
 
@@ -371,6 +379,6 @@ public class DatanodeStorageInfo {
   }
 
   static enum AddBlockResult {
-ADDED, REPLACED, ALREADY_EXIST;
+ADDED, REPLACED, ALREADY_EXIST
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249ca621/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicaUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicaUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicaUnderConstruction.java
new file mode 100644
index 000..f4600cb7
--- /dev/null
+++ 

[48/50] [abbrv] hadoop git commit: HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage. Contributed by Hui Zheng.

2015-03-24 Thread zhz
HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage. 
Contributed by Hui Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f91d9b24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f91d9b24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f91d9b24

Branch: refs/heads/HDFS-7285
Commit: f91d9b24dbf20bfd794b96b2caad36c7dfba4a21
Parents: f879704
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 23 15:10:10 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:35 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  11 +-
 .../hdfs/server/namenode/FSImageFormat.java |  62 ++--
 .../server/namenode/FSImageSerialization.java   |  78 +++---
 .../blockmanagement/TestBlockInfoStriped.java   |  34 +
 .../hdfs/server/namenode/TestFSImage.java   | 148 ++-
 5 files changed, 300 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f91d9b24/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index cef8318..30b5ee7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import java.io.DataOutput;
+import java.io.IOException;
 
 /**
  * Subclass of {@link BlockInfo}, presenting a block group in erasure coding.
@@ -206,6 +208,13 @@ public class BlockInfoStriped extends BlockInfo {
 return num;
   }
 
+  @Override
+  public void write(DataOutput out) throws IOException {
+out.writeShort(dataBlockNum);
+out.writeShort(parityBlockNum);
+super.write(out);
+  }
+
   /**
* Convert a complete block to an under construction block.
* @return BlockInfoUnderConstruction -  an under construction block.
@@ -215,7 +224,7 @@ public class BlockInfoStriped extends BlockInfo {
 final BlockInfoStripedUnderConstruction ucBlock;
 if(isComplete()) {
   ucBlock = new BlockInfoStripedUnderConstruction(this, getDataBlockNum(),
-  getParityBlockNum(),  s, targets);
+  getParityBlockNum(), s, targets);
   ucBlock.setBlockCollection(getBlockCollection());
 } else {
   // the block is already under construction

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f91d9b24/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 2e6e741..ad96863 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -47,13 +47,16 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutFlags;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ 

[16/50] [abbrv] hadoop git commit: HDFS-7977. NFS couldn't take percentile intervals. Contributed by Brandon Li

2015-03-24 Thread zhz
HDFS-7977. NFS couldn't take percentile intervals. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/570a83ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/570a83ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/570a83ae

Branch: refs/heads/HDFS-7285
Commit: 570a83ae80faf2076966acf30588733803327844
Parents: 97a7277
Author: Brandon Li brando...@apache.org
Authored: Tue Mar 24 10:49:16 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Tue Mar 24 10:49:16 2015 -0700

--
 .../org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java |  1 -
 .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java   | 13 ++---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  2 ++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md| 12 
 4 files changed, 20 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/570a83ae/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
index 09ee579..05cc0b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
@@ -72,7 +72,6 @@ public class NfsConfigKeys {
   public static final String NFS_HTTPS_ADDRESS_DEFAULT = 0.0.0.0: + 
NFS_HTTPS_PORT_DEFAULT;
   
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_KEY = 
nfs.metrics.percentiles.intervals;
-  public static final String  NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = ;
   
   /*
* HDFS super-user is the user with the same identity as NameNode process

http://git-wip-us.apache.org/repos/asf/hadoop/blob/570a83ae/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
index d36ea73..880a8a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
@@ -90,9 +90,9 @@ public class Nfs3Metrics {
   readNanosQuantiles[i] = registry.newQuantiles(readProcessNanos
   + interval + s, Read process in ns, ops, latency, interval);
   writeNanosQuantiles[i] = registry.newQuantiles(writeProcessNanos
-  + interval + s,  process in ns, ops, latency, interval);
+  + interval + s, Write process in ns, ops, latency, interval);
   commitNanosQuantiles[i] = registry.newQuantiles(commitProcessNanos
-  + interval + s, Read process in ns, ops, latency, interval);
+  + interval + s, Commit process in ns, ops, latency, 
interval);
 }
   }
 
@@ -101,10 +101,9 @@ public class Nfs3Metrics {
 MetricsSystem ms = DefaultMetricsSystem.instance();
 JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);
 
-// Percentile measurement is [,,,] by default 
-int[] intervals = conf.getInts(conf.get(
-NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY,
-NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT));
+// Percentile measurement is [50th,75th,90th,95th,99th] currently 
+int[] intervals = conf
+.getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
 return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
   }
   
@@ -217,4 +216,4 @@ public class Nfs3Metrics {
 }
   }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/570a83ae/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3725a03..5dae029 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1260,6 +1260,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via 
wang)
 
+HDFS-7977. NFS couldn't take percentile intervals (brandonli)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, 

[38/50] [abbrv] hadoop git commit: HDFS-7872. Erasure Coding: INodeFile.dumpTreeRecursively() supports to print striped blocks. Contributed by Takuya Fukudome.

2015-03-24 Thread zhz
HDFS-7872. Erasure Coding: INodeFile.dumpTreeRecursively() supports to print 
striped blocks. Contributed by Takuya Fukudome.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eee65b32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eee65b32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eee65b32

Branch: refs/heads/HDFS-7285
Commit: eee65b3283d1af1836555aa5d873eba9a2f1c3ae
Parents: 57551f1
Author: Jing Zhao ji...@apache.org
Authored: Thu Mar 5 16:44:38 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:33 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee65b32/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 2906996..8a6bb69 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -862,8 +862,8 @@ public class INodeFile extends INodeWithAdditionalFields
 out.print(, fileSize= + computeFileSize(snapshotId));
 // only compare the first block
 out.print(, blocks=);
-out.print(blocks == null || blocks.length == 0? null: blocks[0]);
-// TODO print striped blocks
+BlockInfo[] blks = getBlocks();
+out.print(blks == null || blks.length == 0? null: blks[0]);
 out.println();
   }
 



[30/50] [abbrv] hadoop git commit: HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info. Contributed by Jing Zhao.

2015-03-24 Thread zhz
HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info. Contributed by 
Jing Zhao.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/249ca621
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/249ca621
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/249ca621

Branch: refs/heads/HDFS-7285
Commit: 249ca621cfbc8c23d2da36eb9b7336349aef69bd
Parents: 97ce9b9
Author: Jing Zhao ji...@apache.org
Authored: Tue Feb 10 17:54:10 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:32 2015 -0700

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |   1 +
 .../server/blockmanagement/BlockCollection.java |  13 +-
 .../server/blockmanagement/BlockIdManager.java  |   7 +-
 .../hdfs/server/blockmanagement/BlockInfo.java  | 339 +
 .../blockmanagement/BlockInfoContiguous.java| 363 +++
 .../BlockInfoContiguousUnderConstruction.java   | 137 +--
 .../blockmanagement/BlockInfoStriped.java   | 179 +
 .../server/blockmanagement/BlockManager.java| 188 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  |  46 +--
 .../CacheReplicationMonitor.java|  10 +-
 .../blockmanagement/DatanodeDescriptor.java |  22 +-
 .../blockmanagement/DatanodeStorageInfo.java|  38 +-
 .../ReplicaUnderConstruction.java   | 119 ++
 .../hdfs/server/namenode/FSDirectory.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  24 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |   3 +-
 .../snapshot/FSImageFormatPBSnapshot.java   |   4 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   4 +-
 .../server/blockmanagement/TestBlockInfo.java   |   6 +-
 .../blockmanagement/TestBlockInfoStriped.java   | 219 +++
 .../blockmanagement/TestBlockManager.java   |   4 +-
 .../blockmanagement/TestReplicationPolicy.java  |   2 +-
 22 files changed, 1125 insertions(+), 607 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/249ca621/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index de60b6e..245b630 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -184,5 +184,6 @@ public class HdfsConstants {
 
   public static final byte NUM_DATA_BLOCKS = 3;
   public static final byte NUM_PARITY_BLOCKS = 2;
+  public static final long BLOCK_GROUP_INDEX_MASK = 15;
   public static final byte MAX_BLOCKS_IN_GROUP = 16;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249ca621/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 1547611..974cac3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -39,12 +39,12 @@ public interface BlockCollection {
   public ContentSummary computeContentSummary();
 
   /**
-   * @return the number of blocks
+   * @return the number of blocks or block groups
*/ 
   public int numBlocks();
 
   /**
-   * Get the blocks.
+   * Get the blocks or block groups.
*/
   public BlockInfoContiguous[] getBlocks();
 
@@ -55,8 +55,8 @@ public interface BlockCollection {
   public long getPreferredBlockSize();
 
   /**
-   * Get block replication for the collection 
-   * @return block replication value
+   * Get block replication for the collection.
+   * @return block replication value. Return 0 if the file is erasure coded.
*/
   public short getBlockReplication();
 
@@ -71,7 +71,7 @@ public interface BlockCollection {
   public String getName();
 
   /**
-   * Set the block at the given index.
+   * Set the block/block-group at the given index.
*/
   public void setBlock(int index, BlockInfoContiguous blk);
 
@@ -79,7 +79,8 @@ public interface 

[44/50] [abbrv] hadoop git commit: HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai Zheng

2015-03-24 Thread zhz
HADOOP-11707. Add factory to create raw erasure coder.  Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f5926d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f5926d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f5926d3

Branch: refs/heads/HDFS-7285
Commit: 9f5926d331c90f5b5011c0ee01c0c50140c9702c
Parents: 25806c0
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Mar 20 15:07:00 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:35 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +-
 .../rawcoder/JRSRawErasureCoderFactory.java | 34 ++
 .../rawcoder/RawErasureCoderFactory.java| 38 
 .../rawcoder/XorRawErasureCoderFactory.java | 34 ++
 4 files changed, 108 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f5926d3/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index e27ff5c..f566f0e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -24,4 +24,5 @@
 HADOOP-11706. Refine a little bit erasure coder API. Contributed by Kai 
Zheng
 ( Kai Zheng )
 
-
+HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai 
Zheng
+( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f5926d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
new file mode 100644
index 000..d6b40aa
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+/**
+ * A raw coder factory for raw Reed-Solomon coder in Java.
+ */
+public class JRSRawErasureCoderFactory implements RawErasureCoderFactory {
+
+  @Override
+  public RawErasureEncoder createEncoder() {
+return new JRSRawEncoder();
+  }
+
+  @Override
+  public RawErasureDecoder createDecoder() {
+return new JRSRawDecoder();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f5926d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
new file mode 100644
index 000..95a1cfe
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in 

[42/50] [abbrv] hadoop git commit: HDFS-7912. Erasure Coding: track BlockInfo instead of Block in UnderReplicatedBlocks and PendingReplicationBlocks. Contributed by Jing Zhao.

2015-03-24 Thread zhz
HDFS-7912. Erasure Coding: track BlockInfo instead of Block in 
UnderReplicatedBlocks and PendingReplicationBlocks. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c0e02ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c0e02ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c0e02ae

Branch: refs/heads/HDFS-7285
Commit: 4c0e02ae72374e86782ec4851b80cf444103ef6d
Parents: 227cffd
Author: Jing Zhao ji...@apache.org
Authored: Tue Mar 17 10:18:50 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:34 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 47 -
 .../PendingReplicationBlocks.java   | 51 +--
 .../blockmanagement/UnderReplicatedBlocks.java  | 49 +-
 .../hdfs/server/namenode/FSDirAttrOp.java   | 10 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 21 
 .../hadoop/hdfs/server/namenode/INode.java  | 12 ++---
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  4 +-
 .../hdfs/server/namenode/NamenodeFsck.java  | 10 ++--
 .../hadoop/hdfs/server/namenode/SafeMode.java   |  3 +-
 .../blockmanagement/BlockManagerTestUtil.java   |  5 +-
 .../blockmanagement/TestBlockManager.java   |  8 +--
 .../server/blockmanagement/TestNodeCount.java   |  3 +-
 .../TestOverReplicatedBlocks.java   |  5 +-
 .../blockmanagement/TestPendingReplication.java | 19 ---
 .../TestRBWBlockInvalidation.java   |  4 +-
 .../blockmanagement/TestReplicationPolicy.java  | 53 +++-
 .../TestUnderReplicatedBlockQueues.java | 16 +++---
 .../datanode/TestReadOnlySharedStorage.java |  9 ++--
 .../namenode/TestProcessCorruptBlocks.java  |  5 +-
 19 files changed, 180 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c0e02ae/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a6bccc1..f7a00f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1336,7 +1336,7 @@ public class BlockManager {
* @return number of blocks scheduled for replication during this iteration.
*/
   int computeReplicationWork(int blocksToProcess) {
-ListListBlock blocksToReplicate = null;
+ListListBlockInfo blocksToReplicate = null;
 namesystem.writeLock();
 try {
   // Choose the blocks to be replicated
@@ -1354,7 +1354,7 @@ public class BlockManager {
* @return the number of blocks scheduled for replication
*/
   @VisibleForTesting
-  int computeReplicationWorkForBlocks(ListListBlock blocksToReplicate) {
+  int computeReplicationWorkForBlocks(ListListBlockInfo blocksToReplicate) 
{
 int requiredReplication, numEffectiveReplicas;
 ListDatanodeDescriptor containingNodes;
 DatanodeDescriptor srcNode;
@@ -1368,7 +1368,7 @@ public class BlockManager {
 try {
   synchronized (neededReplications) {
 for (int priority = 0; priority  blocksToReplicate.size(); 
priority++) {
-  for (Block block : blocksToReplicate.get(priority)) {
+  for (BlockInfo block : blocksToReplicate.get(priority)) {
 // block should belong to a file
 bc = blocksMap.getBlockCollection(block);
 // abandoned block or block reopened for append
@@ -1452,7 +1452,7 @@ public class BlockManager {
 }
 
 synchronized (neededReplications) {
-  Block block = rw.block;
+  BlockInfo block = rw.block;
   int priority = rw.priority;
   // Recheck since global lock was released
   // block should belong to a file
@@ -1710,7 +1710,7 @@ public class BlockManager {
* and put them back into the neededReplication queue
*/
   private void processPendingReplications() {
-Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
+BlockInfo[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();
   try {
@@ -2883,13 +2883,13 @@ public class BlockManager {
   
   /** Set replication for the blocks. */
   public void setReplication(final short oldRepl, final short newRepl,
-  final String src, final Block... blocks) {
+  final String src, 

[36/50] [abbrv] hadoop git commit: HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode. Contributed by Jing Zhao.

2015-03-24 Thread zhz
HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode. 
Contributed by Jing Zhao.

 Conflicts:
 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d7e3914
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d7e3914
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d7e3914

Branch: refs/heads/HDFS-7285
Commit: 7d7e39140186a49c18d25ac1404777968ddc5c35
Parents: 3fa8b6e
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 2 13:44:33 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:33 2015 -0700

--
 .../server/blockmanagement/BlockIdManager.java  |  31 +++-
 .../hdfs/server/blockmanagement/BlockInfo.java  |   4 +-
 .../blockmanagement/BlockInfoContiguous.java|   5 +
 .../blockmanagement/BlockInfoStriped.java   |   8 +-
 .../server/blockmanagement/BlockManager.java|  44 --
 .../hdfs/server/blockmanagement/BlocksMap.java  |  20 ++-
 .../blockmanagement/DecommissionManager.java|   9 +-
 .../hdfs/server/namenode/FSDirectory.java   |  27 +++-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  69 ++---
 .../hdfs/server/namenode/FSImageFormat.java |  12 +-
 .../server/namenode/FSImageFormatPBINode.java   |   5 +-
 .../server/namenode/FSImageFormatProtobuf.java  |   9 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  39 ++---
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  25 +++-
 .../server/namenode/NameNodeLayoutVersion.java  |   3 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../hdfs/server/namenode/TestAddBlockgroup.java |  85 ---
 .../server/namenode/TestAddStripedBlocks.java   | 146 +++
 18 files changed, 354 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d7e3914/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 3ae54ce..1d69d74 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -103,21 +103,38 @@ public class BlockIdManager {
   }
 
   /**
-   * Sets the maximum allocated block ID for this filesystem. This is
+   * Sets the maximum allocated contiguous block ID for this filesystem. This 
is
* the basis for allocating new block IDs.
*/
-  public void setLastAllocatedBlockId(long blockId) {
+  public void setLastAllocatedContiguousBlockId(long blockId) {
 blockIdGenerator.skipTo(blockId);
   }
 
   /**
-   * Gets the maximum sequentially allocated block ID for this filesystem
+   * Gets the maximum sequentially allocated contiguous block ID for this
+   * filesystem
*/
-  public long getLastAllocatedBlockId() {
+  public long getLastAllocatedContiguousBlockId() {
 return blockIdGenerator.getCurrentValue();
   }
 
   /**
+   * Sets the maximum allocated striped block ID for this filesystem. This is
+   * the basis for allocating new block IDs.
+   */
+  public void setLastAllocatedStripedBlockId(long blockId) {
+blockGroupIdGenerator.skipTo(blockId);
+  }
+
+  /**
+   * Gets the maximum sequentially allocated striped block ID for this
+   * filesystem
+   */
+  public long getLastAllocatedStripedBlockId() {
+return blockGroupIdGenerator.getCurrentValue();
+  }
+
+  /**
* Sets the current generation stamp for legacy blocks
*/
   public void setGenerationStampV1(long stamp) {
@@ -188,11 +205,11 @@ public class BlockIdManager {
   /**
* Increments, logs and then returns the block ID
*/
-  public long nextBlockId() {
+  public long nextContiguousBlockId() {
 return blockIdGenerator.nextValue();
   }
 
-  public long nextBlockGroupId() {
+  public long nextStripedBlockId() {
 return blockGroupIdGenerator.nextValue();
   }
 
@@ -216,7 +233,7 @@ public class BlockIdManager {
 return id  0;
   }
 
-  public static long convertToGroupID(long id) {
+  public static long convertToStripedID(long id) {
 return id  (~HdfsConstants.BLOCK_GROUP_INDEX_MASK);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d7e3914/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--

[49/50] [abbrv] hadoop git commit: HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng

2015-03-24 Thread zhz
HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69782710
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69782710
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69782710

Branch: refs/heads/HDFS-7285
Commit: 697827102014a90c1c64b76be170d940c78516bc
Parents: 9f5926d
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Mar 20 19:15:52 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:35 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +
 .../hadoop/fs/CommonConfigurationKeys.java  | 15 
 .../erasurecode/coder/AbstractErasureCoder.java | 65 ++
 .../coder/AbstractErasureDecoder.java   |  6 +-
 .../coder/AbstractErasureEncoder.java   |  6 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  | 83 ++
 .../io/erasurecode/coder/RSErasureEncoder.java  | 47 ++
 .../io/erasurecode/coder/XorErasureDecoder.java |  2 +-
 .../io/erasurecode/coder/XorErasureEncoder.java |  2 +-
 .../erasurecode/coder/TestRSErasureCoder.java   | 92 
 10 files changed, 315 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69782710/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index f566f0e..b69e69a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -26,3 +26,6 @@
 
 HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai 
Zheng
 ( Kai Zheng )
+
+HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng
+( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69782710/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 7575496..70fea01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -135,6 +135,21 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   false;
 
   /**
+   * Erasure Coding configuration family
+   */
+
+  /** Supported erasure codec classes */
+  public static final String IO_ERASURECODE_CODECS_KEY = 
io.erasurecode.codecs;
+
+  /** Use XOR raw coder when possible for the RS codec */
+  public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
+  io.erasurecode.codec.rs.usexor;
+
+  /** Raw coder factory for the RS codec */
+  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
+  io.erasurecode.codec.rs.rawcoder;
+
+  /**
* Service Authorization
*/
   public static final String 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69782710/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
index 8d3bc34..0e4de89 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
@@ -17,7 +17,12 @@
  */
 package org.apache.hadoop.io.erasurecode.coder;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 
 /**
  * A common class of basic facilities to be shared by encoder and decoder
@@ -31,6 +36,66 @@ public abstract class AbstractErasureCoder
   private int numParityUnits;
   private int chunkSize;
 
+  /**
+   * Create raw decoder using the factory specified by 

[25/50] [abbrv] hadoop git commit: HDFS-7749. Erasure Coding: Add striped block support in INodeFile. Contributed by Jing Zhao.

2015-03-24 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fa8b6e5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java
new file mode 100644
index 000..47445be
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+
+/**
+ * Feature for file with striped blocks
+ */
+class FileWithStripedBlocksFeature implements INode.Feature {
+  private BlockInfoStriped[] blocks;
+
+  FileWithStripedBlocksFeature() {
+blocks = new BlockInfoStriped[0];
+  }
+
+  FileWithStripedBlocksFeature(BlockInfoStriped[] blocks) {
+Preconditions.checkArgument(blocks != null);
+this.blocks = blocks;
+  }
+
+  BlockInfoStriped[] getBlocks() {
+return this.blocks;
+  }
+
+  void setBlock(int index, BlockInfoStriped blk) {
+blocks[index] = blk;
+  }
+
+  BlockInfoStriped getLastBlock() {
+return blocks == null || blocks.length == 0 ?
+null : blocks[blocks.length - 1];
+  }
+
+  int numBlocks() {
+return blocks == null ? 0 : blocks.length;
+  }
+
+  void updateBlockCollection(INodeFile file) {
+if (blocks != null) {
+  for (BlockInfoStriped blk : blocks) {
+blk.setBlockCollection(file);
+  }
+}
+  }
+
+  private void setBlocks(BlockInfoStriped[] blocks) {
+this.blocks = blocks;
+  }
+
+  void addBlock(BlockInfoStriped newBlock) {
+if (this.blocks == null) {
+  this.setBlocks(new BlockInfoStriped[]{newBlock});
+} else {
+  int size = this.blocks.length;
+  BlockInfoStriped[] newlist = new BlockInfoStriped[size + 1];
+  System.arraycopy(this.blocks, 0, newlist, 0, size);
+  newlist[size] = newBlock;
+  this.setBlocks(newlist);
+}
+  }
+
+  boolean removeLastBlock(Block oldblock) {
+if (blocks == null || blocks.length == 0) {
+  return false;
+}
+int newSize = blocks.length - 1;
+if (!blocks[newSize].equals(oldblock)) {
+  return false;
+}
+
+//copy to a new list
+BlockInfoStriped[] newlist = new BlockInfoStriped[newSize];
+System.arraycopy(blocks, 0, newlist, 0, newSize);
+setBlocks(newlist);
+return true;
+  }
+
+  void truncateStripedBlocks(int n) {
+final BlockInfoStriped[] newBlocks;
+if (n == 0) {
+  newBlocks = new BlockInfoStriped[0];
+} else {
+  newBlocks = new BlockInfoStriped[n];
+  System.arraycopy(getBlocks(), 0, newBlocks, 0, n);
+}
+// set new blocks
+setBlocks(newBlocks);
+  }
+
+  void clear() {
+this.blocks = null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fa8b6e5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 3772690..d3c5e3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION;
 import static 

[35/50] [abbrv] hadoop git commit: HADOOP-11646. Erasure Coder API for encoding and decoding of block group ( Contributed by Kai Zheng )

2015-03-24 Thread zhz
HADOOP-11646. Erasure Coder API for encoding and decoding of block group ( 
Contributed by Kai Zheng )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11b648bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11b648bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11b648bd

Branch: refs/heads/HDFS-7285
Commit: 11b648bdd2c8f28b91e7a39b7d779c1af4fd320c
Parents: eee65b3
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Mar 9 12:32:26 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:33 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   2 +
 .../apache/hadoop/io/erasurecode/ECBlock.java   |  80 ++
 .../hadoop/io/erasurecode/ECBlockGroup.java |  82 ++
 .../erasurecode/coder/AbstractErasureCoder.java |  63 +
 .../coder/AbstractErasureCodingStep.java|  59 
 .../coder/AbstractErasureDecoder.java   | 152 +++
 .../coder/AbstractErasureEncoder.java   |  50 
 .../io/erasurecode/coder/ErasureCoder.java  |  77 ++
 .../io/erasurecode/coder/ErasureCodingStep.java |  55 
 .../io/erasurecode/coder/ErasureDecoder.java|  41 +++
 .../erasurecode/coder/ErasureDecodingStep.java  |  52 
 .../io/erasurecode/coder/ErasureEncoder.java|  39 +++
 .../erasurecode/coder/ErasureEncodingStep.java  |  49 
 .../io/erasurecode/coder/XorErasureDecoder.java |  78 ++
 .../io/erasurecode/coder/XorErasureEncoder.java |  45 
 .../erasurecode/rawcoder/RawErasureCoder.java   |   2 +-
 .../erasurecode/coder/TestErasureCoderBase.java | 266 +++
 .../io/erasurecode/coder/TestXorCoder.java  |  50 
 18 files changed, 1241 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11b648bd/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index ee42c84..c17a1bd 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -15,4 +15,6 @@
 HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai 
Zheng
 ( Kai Zheng )
 
+HADOOP-11646. Erasure Coder API for encoding and decoding of block group
+( Kai Zheng via vinayakumarb )
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/11b648bd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
new file mode 100644
index 000..956954a
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+/**
+ * A wrapper of block level data source/output that {@link ECChunk}s can be
+ * extracted from. For HDFS, it can be an HDFS block (250MB). Note it only 
cares
+ * about erasure coding specific logic thus avoids coupling with any HDFS block
+ * details. We can have something like HdfsBlock extend it.
+ */
+public class ECBlock {
+
+  private boolean isParity;
+  private boolean isErased;
+
+  /**
+   * A default constructor. isParity and isErased are false by default.
+   */
+  public ECBlock() {
+this(false, false);
+  }
+
+  /**
+   * A constructor specifying isParity and isErased.
+   * @param isParity
+   * @param isErased
+   */
+  public ECBlock(boolean isParity, boolean isErased) {
+this.isParity = isParity;
+this.isErased = isErased;
+  }
+
+  /**
+   * Set true if it's for a parity block.
+   * @param isParity
+   */

[10/50] [abbrv] hadoop git commit: Fix CHANGES.txt for HADOOP-11602.

2015-03-24 Thread zhz
Fix CHANGES.txt for HADOOP-11602.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ca5bd16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ca5bd16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ca5bd16

Branch: refs/heads/HDFS-7285
Commit: 3ca5bd163292e661473017e70b9ca77f5a5b78c0
Parents: 6e891a9
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 24 21:06:26 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 24 21:06:26 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ca5bd16/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4f0cf97..cdb88d2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -415,8 +415,6 @@ Trunk (Unreleased)
 HADOOP-10774. Update KerberosTestUtils for hadoop-auth tests when using
 IBM Java (sangamesh via aw)
 
-HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
-
 HADOOP-11653. shellprofiles should require .sh extension
 (Brahma Reddy Battula via aw)
 
@@ -1105,6 +1103,8 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
 
+HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
+
 HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN.
 (Duo Zhang via wheat9)
 



[03/50] [abbrv] hadoop git commit: YARN-3393. Getting application(s) goes wrong when app finishes before starting the attempt. Contributed by Zhijie Shen

2015-03-24 Thread zhz
YARN-3393. Getting application(s) goes wrong when app finishes before
starting the attempt. Contributed by Zhijie Shen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fae455e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fae455e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fae455e

Branch: refs/heads/HDFS-7285
Commit: 9fae455e26e0230107e1c6db58a49a5b6b296cf4
Parents: 2c238ae
Author: Xuan xg...@apache.org
Authored: Mon Mar 23 20:33:16 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Mon Mar 23 20:33:16 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 ...pplicationHistoryManagerOnTimelineStore.java | 13 +++
 ...pplicationHistoryManagerOnTimelineStore.java | 39 +---
 3 files changed, 42 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fae455e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e7d4f59..3d9f271 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -828,6 +828,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3336. FileSystem memory leak in DelegationTokenRenewer.
 (Zhihai Xu via cnauroth)
 
+YARN-3393. Getting application(s) goes wrong when app finishes before
+starting the attempt. (Zhijie Shen via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fae455e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 1010f62..49041c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -517,15 +517,14 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   if (app.appReport.getCurrentApplicationAttemptId() != null) {
 ApplicationAttemptReport appAttempt =
 
getApplicationAttempt(app.appReport.getCurrentApplicationAttemptId());
-if (appAttempt != null) {
-  app.appReport.setHost(appAttempt.getHost());
-  app.appReport.setRpcPort(appAttempt.getRpcPort());
-  app.appReport.setTrackingUrl(appAttempt.getTrackingUrl());
-  
app.appReport.setOriginalTrackingUrl(appAttempt.getOriginalTrackingUrl());
-}
+app.appReport.setHost(appAttempt.getHost());
+app.appReport.setRpcPort(appAttempt.getRpcPort());
+app.appReport.setTrackingUrl(appAttempt.getTrackingUrl());
+
app.appReport.setOriginalTrackingUrl(appAttempt.getOriginalTrackingUrl());
   }
-} catch (AuthorizationException e) {
+} catch (AuthorizationException | ApplicationAttemptNotFoundException e) {
   // AuthorizationException is thrown because the user doesn't have access
+  // It's possible that the app is finished before the first attempt is 
created.
   app.appReport.setDiagnostics(null);
   app.appReport.setCurrentApplicationAttemptId(null);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fae455e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
 

[34/50] [abbrv] hadoop git commit: Fixed a compiling issue introduced by HADOOP-11705.

2015-03-24 Thread zhz
Fixed a compiling issue introduced by HADOOP-11705.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2825dd75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2825dd75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2825dd75

Branch: refs/heads/HDFS-7285
Commit: 2825dd75b9a542f561b395f1fe9db12c4c04f930
Parents: 840d83d
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Mar 13 00:13:06 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:33 2015 -0700

--
 .../apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2825dd75/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
index 36e061a..d911db9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
@@ -162,7 +162,7 @@ public abstract class TestErasureCoderBase extends 
TestCoderBase {
 }
 
 encoder.initialize(numDataUnits, numParityUnits, chunkSize);
-encoder.setConf(conf);
+((AbstractErasureCoder)encoder).setConf(conf);
 return encoder;
   }
 
@@ -179,7 +179,7 @@ public abstract class TestErasureCoderBase extends 
TestCoderBase {
 }
 
 decoder.initialize(numDataUnits, numParityUnits, chunkSize);
-decoder.setConf(conf);
+((AbstractErasureCoder)decoder).setConf(conf);
 return decoder;
   }
 



[43/50] [abbrv] hadoop git commit: HADOOP-11706 Refine a little bit erasure coder API

2015-03-24 Thread zhz
HADOOP-11706 Refine a little bit erasure coder API


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cec33463
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cec33463
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cec33463

Branch: refs/heads/HDFS-7285
Commit: cec3346399d1e5ca1513acc0a8d0227faa35677d
Parents: 4c0e02a
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Mar 18 19:21:37 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:34 2015 -0700

--
 .../io/erasurecode/coder/ErasureCoder.java  |  4 +++-
 .../erasurecode/rawcoder/RawErasureCoder.java   |  4 +++-
 .../hadoop/io/erasurecode/TestCoderBase.java| 17 +---
 .../erasurecode/coder/TestErasureCoderBase.java | 21 +++-
 .../erasurecode/rawcoder/TestJRSRawCoder.java   | 12 +--
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  2 ++
 6 files changed, 31 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cec33463/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
index 68875c0..c5922f3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode.coder;
 
+import org.apache.hadoop.conf.Configurable;
+
 /**
  * An erasure coder to perform encoding or decoding given a group. Generally it
  * involves calculating necessary internal steps according to codec logic. For
@@ -31,7 +33,7 @@ package org.apache.hadoop.io.erasurecode.coder;
  * of multiple coding steps.
  *
  */
-public interface ErasureCoder {
+public interface ErasureCoder extends Configurable {
 
   /**
* Initialize with the important parameters for the code.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cec33463/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
index 91a9abf..9af5b6c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
+import org.apache.hadoop.conf.Configurable;
+
 /**
  * RawErasureCoder is a common interface for {@link RawErasureEncoder} and
  * {@link RawErasureDecoder} as both encoder and decoder share some properties.
@@ -31,7 +33,7 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
  * low level constructs, since it only takes care of the math calculation with
  * a group of byte buffers.
  */
-public interface RawErasureCoder {
+public interface RawErasureCoder extends Configurable {
 
   /**
* Initialize with the important parameters for the code.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cec33463/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
index 194413a..22fd98d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
+import org.apache.hadoop.conf.Configuration;
+
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Random;
 
-import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -31,6 +32,7 @@ import static org.junit.Assert.assertTrue;
 public abstract class TestCoderBase {
   protected static Random RAND = new Random();
 
+  private 

[19/50] [abbrv] hadoop git commit: HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding (Kai Zheng via umamahesh)

2015-03-24 Thread zhz
HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding (Kai 
Zheng via umamahesh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21bd6a4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21bd6a4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21bd6a4c

Branch: refs/heads/HDFS-7285
Commit: 21bd6a4cbac991b3ce1237f9f24370398ffa94ea
Parents: 57084f5
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Thu Jan 29 14:15:13 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:31 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  4 +
 .../apache/hadoop/io/erasurecode/ECChunk.java   | 82 +
 .../rawcoder/AbstractRawErasureCoder.java   | 63 +
 .../rawcoder/AbstractRawErasureDecoder.java | 93 
 .../rawcoder/AbstractRawErasureEncoder.java | 93 
 .../erasurecode/rawcoder/RawErasureCoder.java   | 78 
 .../erasurecode/rawcoder/RawErasureDecoder.java | 55 
 .../erasurecode/rawcoder/RawErasureEncoder.java | 54 
 8 files changed, 522 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21bd6a4c/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
new file mode 100644
index 000..8ce5a89
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -0,0 +1,4 @@
+  BREAKDOWN OF HADOOP-11264 SUBTASKS AND RELATED JIRAS (Common part of 
HDFS-7285)
+
+HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding
+(Kai Zheng via umamahesh)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21bd6a4c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
new file mode 100644
index 000..f84eb11
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A wrapper for ByteBuffer or bytes array for an erasure code chunk.
+ */
+public class ECChunk {
+
+  private ByteBuffer chunkBuffer;
+
+  /**
+   * Wrapping a ByteBuffer
+   * @param buffer
+   */
+  public ECChunk(ByteBuffer buffer) {
+this.chunkBuffer = buffer;
+  }
+
+  /**
+   * Wrapping a bytes array
+   * @param buffer
+   */
+  public ECChunk(byte[] buffer) {
+this.chunkBuffer = ByteBuffer.wrap(buffer);
+  }
+
+  /**
+   * Convert to ByteBuffer
+   * @return ByteBuffer
+   */
+  public ByteBuffer getBuffer() {
+return chunkBuffer;
+  }
+
+  /**
+   * Convert an array of this chunks to an array of ByteBuffers
+   * @param chunks
+   * @return an array of ByteBuffers
+   */
+  public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
+ByteBuffer[] buffers = new ByteBuffer[chunks.length];
+
+for (int i = 0; i  chunks.length; i++) {
+  buffers[i] = chunks[i].getBuffer();
+}
+
+return buffers;
+  }
+
+  /**
+   * Convert an array of this chunks to an array of byte array
+   * @param chunks
+   * @return an array of byte array
+   */
+  public static byte[][] toArray(ECChunk[] chunks) {
+byte[][] bytesArr = new byte[chunks.length][];
+
+for (int i = 0; i  chunks.length; i++) {
+  bytesArr[i] = chunks[i].getBuffer().array();
+}
+
+return bytesArr;
+  }
+}


[27/50] [abbrv] hadoop git commit: HADOOP-11541. Raw XOR coder

2015-03-24 Thread zhz
HADOOP-11541. Raw XOR coder


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33429443
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33429443
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33429443

Branch: refs/heads/HDFS-7285
Commit: 3342944381c1138a1821574c61df9000679ff854
Parents: 7680f1d
Author: Kai Zheng dran...@apache.org
Authored: Sun Feb 8 01:40:27 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:32 2015 -0700

--
 .../io/erasurecode/rawcoder/XorRawDecoder.java  |  81 ++
 .../io/erasurecode/rawcoder/XorRawEncoder.java  |  61 +
 .../hadoop/io/erasurecode/TestCoderBase.java| 262 +++
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  96 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   |  52 
 5 files changed, 552 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33429443/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
new file mode 100644
index 000..98307a7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw decoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
+ */
+public class XorRawDecoder extends AbstractRawErasureDecoder {
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+assert(erasedIndexes.length == outputs.length);
+assert(erasedIndexes.length = 1);
+
+int bufSize = inputs[0].remaining();
+int erasedIdx = erasedIndexes[0];
+
+// Set the output to zeros.
+for (int j = 0; j  bufSize; j++) {
+  outputs[0].put(j, (byte) 0);
+}
+
+// Process the inputs.
+for (int i = 0; i  inputs.length; i++) {
+  // Skip the erased location.
+  if (i == erasedIdx) {
+continue;
+  }
+
+  for (int j = 0; j  bufSize; j++) {
+outputs[0].put(j, (byte) (outputs[0].get(j) ^ inputs[i].get(j)));
+  }
+}
+  }
+
+  @Override
+  protected void doDecode(byte[][] inputs, int[] erasedIndexes,
+  byte[][] outputs) {
+assert(erasedIndexes.length == outputs.length);
+assert(erasedIndexes.length = 1);
+
+int bufSize = inputs[0].length;
+int erasedIdx = erasedIndexes[0];
+
+// Set the output to zeros.
+for (int j = 0; j  bufSize; j++) {
+  outputs[0][j] = 0;
+}
+
+// Process the inputs.
+for (int i = 0; i  inputs.length; i++) {
+  // Skip the erased location.
+  if (i == erasedIdx) {
+continue;
+  }
+
+  for (int j = 0; j  bufSize; j++) {
+outputs[0][j] ^= inputs[i][j];
+  }
+}
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33429443/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
new file mode 100644
index 000..99b20b9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the 

[09/50] [abbrv] hadoop git commit: HADOOP-11609. Correct credential commands info in CommandsManual.html#credential. Contributed by Varun Saxena.

2015-03-24 Thread zhz
HADOOP-11609. Correct credential commands info in 
CommandsManual.html#credential. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e891a92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e891a92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e891a92

Branch: refs/heads/HDFS-7285
Commit: 6e891a921e00b122390a976dfd13838472a7fcc6
Parents: c6c396f
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 24 20:57:39 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 24 20:57:39 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/security/alias/CredentialShell.java   | 2 +-
 .../hadoop-common/src/site/markdown/CommandsManual.md| 4 ++--
 3 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e891a92/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 430015d..4f0cf97 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1136,6 +1136,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11729. Fix link to cgroups doc in site.xml. (Masatake Iwasaki via
 ozawa)
 
+HADOOP-11609. Correct credential commands info in
+CommandsManual.html#credential. (Varun Saxena via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e891a92/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
index e8a721f..265ed16 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
@@ -81,7 +81,7 @@ public class CredentialShell extends Configured implements 
Tool {
* pre
* % hadoop credential create alias [-provider providerPath]
* % hadoop credential list [-provider providerPath]
-   * % hadoop credential delete alias [-provider providerPath] [-i]
+   * % hadoop credential delete alias [-provider providerPath] [-f]
* /pre
* @param args
* @return 0 if the argument(s) were recognized, 1 otherwise

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e891a92/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 33986ae..207160e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -128,8 +128,8 @@ Usage: `hadoop credential subcommand [options]`
 
 | COMMAND\_OPTION | Description |
 |: |: |
-| create *alias* [-v *value*][-provider *provider-path*] | Prompts the user 
for a credential to be stored as the given alias when a value is not provided 
via `-v`. The *hadoop.security.credential.provider.path* within the 
core-site.xml file will be used unless a `-provider` is indicated. |
-| delete *alias* [-i][-provider *provider-path*] | Deletes the credential with 
the provided alias and optionally warns the user when `--interactive` is used. 
The *hadoop.security.credential.provider.path* within the core-site.xml file 
will be used unless a `-provider` is indicated. |
+| create *alias* [-provider *provider-path*] | Prompts the user for a 
credential to be stored as the given alias. The 
*hadoop.security.credential.provider.path* within the core-site.xml file will 
be used unless a `-provider` is indicated. |
+| delete *alias* [-provider *provider-path*] [-f] | Deletes the credential 
with the provided alias. The *hadoop.security.credential.provider.path* within 
the core-site.xml file will be used unless a `-provider` is indicated. The 
command asks for confirmation unless `-f` is specified |
 | list [-provider *provider-path*] | Lists all of the credential aliases The 
*hadoop.security.credential.provider.path* within the core-site.xml file will 
be used unless a `-provider` is indicated. |
 
 Command to manage credentials, passwords 

[33/50] [abbrv] hadoop git commit: HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng

2015-03-24 Thread zhz
HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/840d83dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/840d83dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/840d83dd

Branch: refs/heads/HDFS-7285
Commit: 840d83dd6e26157a61207d4e3988e360e6a2b686
Parents: 2acf8b1
Author: drankye kai.zh...@intel.com
Authored: Thu Mar 12 23:35:22 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:33 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  4 +++
 .../erasurecode/coder/AbstractErasureCoder.java |  5 ++-
 .../rawcoder/AbstractRawErasureCoder.java   |  5 ++-
 .../hadoop/io/erasurecode/TestCoderBase.java|  6 
 .../erasurecode/coder/TestErasureCoderBase.java | 36 +---
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 13 +--
 6 files changed, 60 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d83dd/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c17a1bd..a97dc34 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -18,3 +18,7 @@
 HADOOP-11646. Erasure Coder API for encoding and decoding of block group
 ( Kai Zheng via vinayakumarb )
 
+HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng
+( Kai Zheng )
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d83dd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
index f2cc041..8d3bc34 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.io.erasurecode.coder;
 
+import org.apache.hadoop.conf.Configured;
+
 /**
  * A common class of basic facilities to be shared by encoder and decoder
  *
  * It implements the {@link ErasureCoder} interface.
  */
-public abstract class AbstractErasureCoder implements ErasureCoder {
+public abstract class AbstractErasureCoder
+extends Configured implements ErasureCoder {
 
   private int numDataUnits;
   private int numParityUnits;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d83dd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index 74d2ab6..e6f3d92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
+import org.apache.hadoop.conf.Configured;
+
 /**
  * A common class of basic facilities to be shared by encoder and decoder
  *
  * It implements the {@link RawErasureCoder} interface.
  */
-public abstract class AbstractRawErasureCoder implements RawErasureCoder {
+public abstract class AbstractRawErasureCoder
+extends Configured implements RawErasureCoder {
 
   private int numDataUnits;
   private int numParityUnits;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/840d83dd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
index 3c4288c..194413a 100644
--- 

[15/50] [abbrv] hadoop git commit: YARN-3383. AdminService should use warn instead of info to log exception when operation fails. (Li Lu via wangda)

2015-03-24 Thread zhz
YARN-3383. AdminService should use warn instead of info to log exception when 
operation fails. (Li Lu via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97a7277a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97a7277a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97a7277a

Branch: refs/heads/HDFS-7285
Commit: 97a7277a2d696474b5c8e2d814c8291d4bde246e
Parents: eda0254
Author: Wangda Tan wan...@apache.org
Authored: Tue Mar 24 10:33:09 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Tue Mar 24 10:38:54 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/AdminService.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97a7277a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8a5e142..6a495d9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -103,6 +103,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3241. FairScheduler handles invalid queue names inconsistently. 
 (Zhihai Xu via kasha)
 
+YARN-3383. AdminService should use warn instead of info to log 
exception 
+when operation fails. (Li Lu via wangda)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97a7277a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 22b92c2..12714de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -699,7 +699,7 @@ public class AdminService extends CompositeService 
implements
 
   private YarnException logAndWrapException(Exception exception, String user,
   String argName, String msg) throws YarnException {
-LOG.info(Exception  + msg, exception);
+LOG.warn(Exception  + msg, exception);
 RMAuditLogger.logFailure(user, argName, , 
 AdminService, Exception  + msg);
 return RPCUtil.getRemoteException(exception);



[14/50] [abbrv] hadoop git commit: HDFS-7875. Improve log message when wrong value configured for dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.

2015-03-24 Thread zhz
HDFS-7875. Improve log message when wrong value configured for 
dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eda02540
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eda02540
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eda02540

Branch: refs/heads/HDFS-7285
Commit: eda02540ce53732585b3f31411b2e65db569eb25
Parents: 51f1f49
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 24 23:03:30 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 24 23:06:18 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 4 
 .../hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 6 --
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eda02540/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4f3937a..3725a03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -321,6 +321,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-7875. Improve log message when wrong value configured for
+dfs.datanode.failed.volumes.tolerated.
+(nijel via harsh)
+
 HDFS-2360. Ugly stacktrace when quota exceeds. (harsh)
 
 HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eda02540/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d42c00c..05c4871 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -276,8 +276,10 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 this.validVolsRequired = volsConfigured - volFailuresTolerated;
 
 if (volFailuresTolerated  0 || volFailuresTolerated = volsConfigured) {
-  throw new DiskErrorException(Invalid volume failure 
-  +  config value:  + volFailuresTolerated);
+  throw new DiskErrorException(Invalid value configured for 
+  + dfs.datanode.failed.volumes.tolerated -  + volFailuresTolerated
+  + . Value configured is either less than 0 or = 
+  + to the number of configured volumes ( + volsConfigured + ).);
 }
 if (volsFailed  volFailuresTolerated) {
   throw new DiskErrorException(Too many failed volumes - 



[21/50] [abbrv] hadoop git commit: HDFS-7347. Configurable erasure coding policy for individual files and directories ( Contributed by Zhe Zhang )

2015-03-24 Thread zhz
HDFS-7347. Configurable erasure coding policy for individual files and 
directories ( Contributed by Zhe Zhang )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b810d03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b810d03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b810d03

Branch: refs/heads/HDFS-7285
Commit: 5b810d03a6d67f1409b4f946884cb413ffee32d8
Parents: a16bfff
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Nov 6 10:03:26 2014 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:31 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  4 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  2 +
 .../BlockStoragePolicySuite.java|  5 ++
 .../hadoop/hdfs/TestBlockStoragePolicy.java | 12 +++-
 .../TestBlockInitialEncoding.java   | 75 
 5 files changed, 95 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b810d03/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
new file mode 100644
index 000..2ef8527
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -0,0 +1,4 @@
+  BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS
+
+HDFS-7347. Configurable erasure coding policy for individual files and
+directories ( Zhe Zhang via vinayakumarb )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b810d03/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 7cf8a47..54c650b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -171,6 +171,7 @@ public class HdfsConstants {
   public static final String ONESSD_STORAGE_POLICY_NAME = ONE_SSD;
   public static final String HOT_STORAGE_POLICY_NAME = HOT;
   public static final String WARM_STORAGE_POLICY_NAME = WARM;
+  public static final String EC_STORAGE_POLICY_NAME = EC;
   public static final String COLD_STORAGE_POLICY_NAME = COLD;
 
   public static final byte MEMORY_STORAGE_POLICY_ID = 15;
@@ -178,5 +179,6 @@ public class HdfsConstants {
   public static final byte ONESSD_STORAGE_POLICY_ID = 10;
   public static final byte HOT_STORAGE_POLICY_ID = 7;
   public static final byte WARM_STORAGE_POLICY_ID = 5;
+  public static final byte EC_STORAGE_POLICY_ID = 4;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b810d03/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index 020cb5f..3d121cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -78,6 +78,11 @@ public class BlockStoragePolicySuite {
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
+final byte ecId = HdfsConstants.EC_STORAGE_POLICY_ID;
+policies[ecId] = new BlockStoragePolicy(ecId,
+HdfsConstants.EC_STORAGE_POLICY_NAME,
+new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
+new StorageType[]{StorageType.ARCHIVE});
 final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
 policies[coldId] = new BlockStoragePolicy(coldId,
 HdfsConstants.COLD_STORAGE_POLICY_NAME,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b810d03/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
--
diff --git 

[28/50] [abbrv] hadoop git commit: Added the missed entry for commit of HADOOP-11541

2015-03-24 Thread zhz
Added the missed entry for commit of HADOOP-11541


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97ce9b9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97ce9b9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97ce9b9f

Branch: refs/heads/HDFS-7285
Commit: 97ce9b9f20b9149b9f2f14c2fff0d0bf5a120b20
Parents: 3342944
Author: drankye dran...@gmail.com
Authored: Mon Feb 9 22:04:08 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:32 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97ce9b9f/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 2124800..9728f97 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -4,4 +4,7 @@
 (Kai Zheng via umamahesh)
 
 HADOOP-11534. Minor improvements for raw erasure coders
-( Kai Zheng via vinayakumarb )
\ No newline at end of file
+( Kai Zheng via vinayakumarb )
+
+HADOOP-11541. Raw XOR coder
+( Kai Zheng )



[20/50] [abbrv] hadoop git commit: HDFS-7652. Process block reports for erasure coded blocks. Contributed by Zhe Zhang

2015-03-24 Thread zhz
HDFS-7652. Process block reports for erasure coded blocks. Contributed by Zhe 
Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee9ce478
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee9ce478
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee9ce478

Branch: refs/heads/HDFS-7285
Commit: ee9ce4787bf1b567f65aab7d7b56e93af4ef31d4
Parents: 6a88a8b
Author: Zhe Zhang z...@apache.org
Authored: Mon Feb 9 10:27:14 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:31 2015 -0700

--
 .../server/blockmanagement/BlockIdManager.java|  8 
 .../hdfs/server/blockmanagement/BlockManager.java | 18 +-
 2 files changed, 21 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee9ce478/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index c8b9d20..e7f8a05 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -211,4 +211,12 @@ public class BlockIdManager {
   .LAST_RESERVED_BLOCK_ID);
 generationStampV1Limit = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
   }
+
+  public static boolean isStripedBlockID(long id) {
+return id  0;
+  }
+
+  public static long convertToGroupID(long id) {
+return id  (~(HdfsConstants.MAX_BLOCKS_IN_GROUP - 1));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee9ce478/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 91cfead..10afdaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1925,7 +1925,7 @@ public class BlockManager {
   break;
 }
 
-BlockInfoContiguous bi = blocksMap.getStoredBlock(b);
+BlockInfoContiguous bi = getStoredBlock(b);
 if (bi == null) {
   if (LOG.isDebugEnabled()) {
 LOG.debug(BLOCK* rescanPostponedMisreplicatedBlocks:  +
@@ -2068,7 +2068,7 @@ public class BlockManager {
 continue;
   }
   
-  BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(iblk);
+  BlockInfoContiguous storedBlock = getStoredBlock(iblk);
   // If block does not belong to any file, we are done.
   if (storedBlock == null) continue;
   
@@ -2208,7 +2208,7 @@ public class BlockManager {
 }
 
 // find block by blockId
-BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(block);
+BlockInfoContiguous storedBlock = getStoredBlock(block);
 if(storedBlock == null) {
   // If blocksMap does not contain reported block id,
   // the replica should be removed from the data-node.
@@ -2499,7 +2499,7 @@ public class BlockManager {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 if (block instanceof BlockInfoContiguousUnderConstruction) {
   //refresh our copy in case the block got completed in another thread
-  storedBlock = blocksMap.getStoredBlock(block);
+  storedBlock = getStoredBlock(block);
 } else {
   storedBlock = block;
 }
@@ -3362,7 +3362,15 @@ public class BlockManager {
   }
 
   public BlockInfoContiguous getStoredBlock(Block block) {
-return blocksMap.getStoredBlock(block);
+BlockInfoContiguous info = null;
+if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  info = blocksMap.getStoredBlock(
+  new Block(BlockIdManager.convertToGroupID(block.getBlockId(;
+}
+if (info == null) {
+  info = blocksMap.getStoredBlock(block);
+}
+return info;
   }
 
   /** updates a block in under replication queue */



[41/50] [abbrv] hadoop git commit: HDFS-7826. Erasure Coding: Update INodeFile quota computation for striped blocks. Contributed by Kai Sasaki.

2015-03-24 Thread zhz
HDFS-7826. Erasure Coding: Update INodeFile quota computation for striped 
blocks. Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/227cffd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/227cffd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/227cffd9

Branch: refs/heads/HDFS-7285
Commit: 227cffd986a3604c7d7948cab8e1e8889646be3b
Parents: 03ebc27
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 16 16:37:08 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:34 2015 -0700

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  3 +
 .../blockmanagement/BlockInfoStriped.java   | 12 ++-
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 89 +---
 3 files changed, 90 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/227cffd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 245b630..07b72e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -186,4 +186,7 @@ public class HdfsConstants {
   public static final byte NUM_PARITY_BLOCKS = 2;
   public static final long BLOCK_GROUP_INDEX_MASK = 15;
   public static final byte MAX_BLOCKS_IN_GROUP = 16;
+
+  // The chunk size for striped block which is used by erasure coding
+  public static final int BLOCK_STRIPED_CHUNK_SIZE = 64 * 1024;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/227cffd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 84c3be6..cef8318 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 
 /**
@@ -34,6 +35,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
  * array to record the block index for each triplet.
  */
 public class BlockInfoStriped extends BlockInfo {
+  private final int   chunkSize = HdfsConstants.BLOCK_STRIPED_CHUNK_SIZE;
   private final short dataBlockNum;
   private final short parityBlockNum;
   /**
@@ -56,7 +58,7 @@ public class BlockInfoStriped extends BlockInfo {
 this.setBlockCollection(b.getBlockCollection());
   }
 
-  short getTotalBlockNum() {
+  public short getTotalBlockNum() {
 return (short) (dataBlockNum + parityBlockNum);
   }
 
@@ -178,6 +180,14 @@ public class BlockInfoStriped extends BlockInfo {
 }
   }
 
+  public long spaceConsumed() {
+// In case striped blocks, total usage by this striped blocks should
+// be the total of data blocks and parity blocks because
+// `getNumBytes` is the total of actual data block size.
+return ((getNumBytes() - 1) / (dataBlockNum * chunkSize) + 1)
+* chunkSize * parityBlockNum + getNumBytes();
+  }
+
   @Override
   public final boolean isStriped() {
 return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/227cffd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 8a6bb69..a8ab3ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -42,6 +42,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import 

[45/50] [abbrv] hadoop git commit: HDFS-7864. Erasure Coding: Update safemode calculation for striped blocks. Contributed by GAO Rui.

2015-03-24 Thread zhz
HDFS-7864. Erasure Coding: Update safemode calculation for striped blocks. 
Contributed by GAO Rui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8797047
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8797047
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8797047

Branch: refs/heads/HDFS-7285
Commit: f8797047c1ae0f841e6d57210e0677af469ecb2d
Parents: 0d128ba
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 23 15:06:53 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:35 2015 -0700

--
 .../server/blockmanagement/BlockIdManager.java |  6 ++
 .../hdfs/server/blockmanagement/BlockManager.java  | 12 +++-
 .../hdfs/server/blockmanagement/BlocksMap.java |  2 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  | 17 -
 .../hadoop/hdfs/server/namenode/SafeMode.java  |  5 +++--
 .../java/org/apache/hadoop/hdfs/TestSafeMode.java  | 15 +--
 6 files changed, 42 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8797047/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 1d69d74..187f8c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -233,6 +233,12 @@ public class BlockIdManager {
 return id  0;
   }
 
+  /**
+   * The last 4 bits of HdfsConstants.BLOCK_GROUP_INDEX_MASK(15) is ,
+   * so the last 4 bits of (~HdfsConstants.BLOCK_GROUP_INDEX_MASK) is 
+   * and the other 60 bits are 1. Group ID is the first 60 bits of any
+   * data/parity block id in the same striped block group.
+   */
   public static long convertToStripedID(long id) {
 return id  (~HdfsConstants.BLOCK_GROUP_INDEX_MASK);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8797047/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9f5d2dd..3402e36 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -684,8 +684,10 @@ public class BlockManager {
 // a forced completion when a file is getting closed by an
 // OP_CLOSE edit on the standby).
 namesystem.adjustSafeModeBlockTotals(0, 1);
+final int minStorage = curBlock.isStriped() ?
+((BlockInfoStriped) curBlock).getDataBlockNum() : minReplication;
 namesystem.incrementSafeBlockCount(
-Math.min(numNodes, minReplication));
+Math.min(numNodes, minStorage), curBlock);
 
 // replace block in the blocksMap
 return blocksMap.replaceBlock(completeBlock);
@@ -2206,7 +2208,7 @@ public class BlockManager {
 // refer HDFS-5283
 if (namesystem.isInSnapshot(storedBlock.getBlockCollection())) {
   int numOfReplicas = BlockInfo.getNumExpectedLocations(storedBlock);
-  namesystem.incrementSafeBlockCount(numOfReplicas);
+  namesystem.incrementSafeBlockCount(numOfReplicas, storedBlock);
 }
 //and fall through to next clause
   }  
@@ -2587,14 +2589,14 @@ public class BlockManager {
   // only complete blocks are counted towards that.
   // In the case that the block just became complete above, completeBlock()
   // handles the safe block count maintenance.
-  namesystem.incrementSafeBlockCount(numCurrentReplica);
+  namesystem.incrementSafeBlockCount(numCurrentReplica, storedBlock);
 }
   }
 
   /**
* Modify (block--datanode) map. Remove block from set of
* needed replications if this takes care of the problem.
-   * @return the block that is stored in blockMap.
+   * @return the block that is stored in blocksMap.
*/
   private Block addStoredBlock(final BlockInfo block,
final Block reportedBlock,
@@ -2663,7 +2665,7 @@ public class BlockManager {

[13/50] [abbrv] hadoop git commit: HDFS-7976. Update NFS user guide for mount option 'sync' to minimize or avoid reordered writes. Contributed by Brandon Li

2015-03-24 Thread zhz
HDFS-7976. Update NFS user guide for mount option 'sync' to minimize or avoid 
reordered writes. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51f1f493
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51f1f493
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51f1f493

Branch: refs/heads/HDFS-7285
Commit: 51f1f4937402c63392854443ccde18c1579e3348
Parents: 6413d34
Author: Brandon Li brando...@apache.org
Authored: Tue Mar 24 10:28:38 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Tue Mar 24 10:28:38 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md   | 10 +++---
 2 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f1f493/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 70be18a..4f3937a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -784,6 +784,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7956. Improve logging for DatanodeRegistration.
 (Plamen Jeliazkov via shv)
 
+HDFS-7976. Update NFS user guide for mount option sync to minimize or
+avoid reordered writes. (brandonli)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f1f493/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index b7e1733..9c95287 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -126,7 +126,8 @@ It's strongly recommended for the users to update a few 
configuration properties
 /property
 
 *   Users are expected to update the file dump directory. NFS client often
-reorders writes. Sequential writes can arrive at the NFS gateway at random
+reorders writes, especially when the export is not mounted with sync 
option.
+Sequential writes can arrive at the NFS gateway at random
 order. This directory is used to temporarily save out-of-order writes
 before writing to HDFS. For each file, the out-of-order writes are dumped 
after
 they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
@@ -254,13 +255,16 @@ Verify validity of NFS related services
 Mount the export /
 
 
-Currently NFS v3 only uses TCP as the transportation protocol. NLM is not 
supported so mount option nolock is needed. It's recommended to use hard 
mount. This is because, even after the client sends all data to NFS gateway, it 
may take NFS gateway some extra time to transfer data to HDFS when writes were 
reorderd by NFS client Kernel.
+Currently NFS v3 only uses TCP as the transportation protocol. NLM is not 
supported so mount option nolock is needed. 
+Mount option sync is strongly recommended since it can minimize or avoid 
reordered writes, which results in more predictable throughput.
+ Not specifying the sync option may cause unreliable behavior when uploading 
large files.
+ It's recommended to use hard mount. This is because, even after the client 
sends all data to NFS gateway, it may take NFS gateway some extra time to 
transfer data to HDFS when writes were reorderd by NFS client Kernel.
 
 If soft mount has to be used, the user should give it a relatively long 
timeout (at least no less than the default timeout on the host) .
 
 The users can mount the HDFS namespace as shown below:
 
- [root]mount -t nfs -o vers=3,proto=tcp,nolock,noacl $server:/  
$mount_point
+ [root]mount -t nfs -o vers=3,proto=tcp,nolock,noacl,sync $server:/  
$mount_point
 
 Then the users can access HDFS as part of the local file system except that, 
hard link and random write are not supported yet. To optimize the performance 
of large file I/O, one can increase the NFS transfer size(rsize and wsize) 
during mount. By default, NFS gateway supports 1MB as the maximum transfer 
size. For larger data transfer size, one needs to update nfs.rtmax and 
nfs.rtmax in hdfs-site.xml.
 



[39/50] [abbrv] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt accordingly

2015-03-24 Thread zhz
Updated CHANGES-HDFS-EC-7285.txt accordingly


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b31c21c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b31c21c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b31c21c

Branch: refs/heads/HDFS-7285
Commit: 1b31c21caf1ecebd188c3c69c723b41f087c67b9
Parents: cec3346
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Mar 18 19:24:24 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 24 11:16:34 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b31c21c/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index a97dc34..e27ff5c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -19,6 +19,9 @@
 ( Kai Zheng via vinayakumarb )
 
 HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng
-( Kai Zheng )
+( Kai Zheng )
+
+HADOOP-11706. Refine a little bit erasure coder API. Contributed by Kai 
Zheng
+( Kai Zheng )
 
 



hadoop git commit: HDFS-7976. Update NFS user guide for mount option 'sync' to minimize or avoid reordered writes. Contributed by Brandon Li

2015-03-24 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 a7f447d46 - cc5922607


HDFS-7976. Update NFS user guide for mount option 'sync' to minimize or avoid 
reordered writes. Contributed by Brandon Li

(cherry picked from commit 51f1f4937402c63392854443ccde18c1579e3348)
(cherry picked from commit 95bde8898636156a442949535fd776ccd638e337)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc592260
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc592260
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc592260

Branch: refs/heads/branch-2.7
Commit: cc5922607a168ca7c7c3786095126d9aa30f791c
Parents: a7f447d
Author: Brandon Li brando...@apache.org
Authored: Tue Mar 24 10:28:38 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Tue Mar 24 10:30:19 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md   | 10 +++---
 2 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc592260/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7092f70..e4d737f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -450,6 +450,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7956. Improve logging for DatanodeRegistration.
 (Plamen Jeliazkov via shv)
 
+HDFS-7976. Update NFS user guide for mount option sync to minimize or
+avoid reordered writes. (brandonli)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc592260/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index f1bd696..f6e4a69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -126,7 +126,8 @@ It's strongly recommended for the users to update a few 
configuration properties
 /property
 
 *   Users are expected to update the file dump directory. NFS client often
-reorders writes. Sequential writes can arrive at the NFS gateway at random
+reorders writes, especially when the export is not mounted with sync 
option.
+Sequential writes can arrive at the NFS gateway at random
 order. This directory is used to temporarily save out-of-order writes
 before writing to HDFS. For each file, the out-of-order writes are dumped 
after
 they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
@@ -254,13 +255,16 @@ Verify validity of NFS related services
 Mount the export /
 
 
-Currently NFS v3 only uses TCP as the transportation protocol. NLM is not 
supported so mount option nolock is needed. It's recommended to use hard 
mount. This is because, even after the client sends all data to NFS gateway, it 
may take NFS gateway some extra time to transfer data to HDFS when writes were 
reorderd by NFS client Kernel.
+Currently NFS v3 only uses TCP as the transportation protocol. NLM is not 
supported so mount option nolock is needed. 
+Mount option sync is strongly recommended since it can minimize or avoid 
reordered writes, which results in more predictable throughput.
+ Not specifying the sync option may cause unreliable behavior when uploading 
large files.
+ It's recommended to use hard mount. This is because, even after the client 
sends all data to NFS gateway, it may take NFS gateway some extra time to 
transfer data to HDFS when writes were reorderd by NFS client Kernel.
 
 If soft mount has to be used, the user should give it a relatively long 
timeout (at least no less than the default timeout on the host) .
 
 The users can mount the HDFS namespace as shown below:
 
- [root]mount -t nfs -o vers=3,proto=tcp,nolock,noacl $server:/  
$mount_point
+ [root]mount -t nfs -o vers=3,proto=tcp,nolock,noacl,sync $server:/  
$mount_point
 
 Then the users can access HDFS as part of the local file system except that, 
hard link and random write are not supported yet. To optimize the performance 
of large file I/O, one can increase the NFS transfer size(rsize and wsize) 
during mount. By default, NFS gateway supports 1MB as the maximum transfer 
size. For larger data transfer size, one needs to update nfs.rtmax and 
nfs.rtmax in hdfs-site.xml.
 



hadoop git commit: HDFS-7976. Update NFS user guide for mount option 'sync' to minimize or avoid reordered writes. Contributed by Brandon Li

2015-03-24 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e39c58fd9 - 95bde8898


HDFS-7976. Update NFS user guide for mount option 'sync' to minimize or avoid 
reordered writes. Contributed by Brandon Li

(cherry picked from commit 51f1f4937402c63392854443ccde18c1579e3348)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95bde889
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95bde889
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95bde889

Branch: refs/heads/branch-2
Commit: 95bde8898636156a442949535fd776ccd638e337
Parents: e39c58f
Author: Brandon Li brando...@apache.org
Authored: Tue Mar 24 10:28:38 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Tue Mar 24 10:29:27 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md   | 10 +++---
 2 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bde889/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dda75f9..5f289dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -479,6 +479,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7956. Improve logging for DatanodeRegistration.
 (Plamen Jeliazkov via shv)
 
+HDFS-7976. Update NFS user guide for mount option sync to minimize or
+avoid reordered writes. (brandonli)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bde889/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index f1bd696..f6e4a69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -126,7 +126,8 @@ It's strongly recommended for the users to update a few 
configuration properties
 /property
 
 *   Users are expected to update the file dump directory. NFS client often
-reorders writes. Sequential writes can arrive at the NFS gateway at random
+reorders writes, especially when the export is not mounted with sync 
option.
+Sequential writes can arrive at the NFS gateway at random
 order. This directory is used to temporarily save out-of-order writes
 before writing to HDFS. For each file, the out-of-order writes are dumped 
after
 they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
@@ -254,13 +255,16 @@ Verify validity of NFS related services
 Mount the export /
 
 
-Currently NFS v3 only uses TCP as the transportation protocol. NLM is not 
supported so mount option nolock is needed. It's recommended to use hard 
mount. This is because, even after the client sends all data to NFS gateway, it 
may take NFS gateway some extra time to transfer data to HDFS when writes were 
reorderd by NFS client Kernel.
+Currently NFS v3 only uses TCP as the transportation protocol. NLM is not 
supported so mount option nolock is needed. 
+Mount option sync is strongly recommended since it can minimize or avoid 
reordered writes, which results in more predictable throughput.
+ Not specifying the sync option may cause unreliable behavior when uploading 
large files.
+ It's recommended to use hard mount. This is because, even after the client 
sends all data to NFS gateway, it may take NFS gateway some extra time to 
transfer data to HDFS when writes were reorderd by NFS client Kernel.
 
 If soft mount has to be used, the user should give it a relatively long 
timeout (at least no less than the default timeout on the host) .
 
 The users can mount the HDFS namespace as shown below:
 
- [root]mount -t nfs -o vers=3,proto=tcp,nolock,noacl $server:/  
$mount_point
+ [root]mount -t nfs -o vers=3,proto=tcp,nolock,noacl,sync $server:/  
$mount_point
 
 Then the users can access HDFS as part of the local file system except that, 
hard link and random write are not supported yet. To optimize the performance 
of large file I/O, one can increase the NFS transfer size(rsize and wsize) 
during mount. By default, NFS gateway supports 1MB as the maximum transfer 
size. For larger data transfer size, one needs to update nfs.rtmax and 
nfs.rtmax in hdfs-site.xml.
 



hadoop git commit: HDFS-7976. Update NFS user guide for mount option 'sync' to minimize or avoid reordered writes. Contributed by Brandon Li

2015-03-24 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6413d3498 - 51f1f4937


HDFS-7976. Update NFS user guide for mount option 'sync' to minimize or avoid 
reordered writes. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51f1f493
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51f1f493
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51f1f493

Branch: refs/heads/trunk
Commit: 51f1f4937402c63392854443ccde18c1579e3348
Parents: 6413d34
Author: Brandon Li brando...@apache.org
Authored: Tue Mar 24 10:28:38 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Tue Mar 24 10:28:38 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md   | 10 +++---
 2 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f1f493/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 70be18a..4f3937a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -784,6 +784,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7956. Improve logging for DatanodeRegistration.
 (Plamen Jeliazkov via shv)
 
+HDFS-7976. Update NFS user guide for mount option sync to minimize or
+avoid reordered writes. (brandonli)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f1f493/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index b7e1733..9c95287 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -126,7 +126,8 @@ It's strongly recommended for the users to update a few 
configuration properties
 /property
 
 *   Users are expected to update the file dump directory. NFS client often
-reorders writes. Sequential writes can arrive at the NFS gateway at random
+reorders writes, especially when the export is not mounted with sync 
option.
+Sequential writes can arrive at the NFS gateway at random
 order. This directory is used to temporarily save out-of-order writes
 before writing to HDFS. For each file, the out-of-order writes are dumped 
after
 they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
@@ -254,13 +255,16 @@ Verify validity of NFS related services
 Mount the export /
 
 
-Currently NFS v3 only uses TCP as the transportation protocol. NLM is not 
supported so mount option nolock is needed. It's recommended to use hard 
mount. This is because, even after the client sends all data to NFS gateway, it 
may take NFS gateway some extra time to transfer data to HDFS when writes were 
reorderd by NFS client Kernel.
+Currently NFS v3 only uses TCP as the transportation protocol. NLM is not 
supported so mount option nolock is needed. 
+Mount option sync is strongly recommended since it can minimize or avoid 
reordered writes, which results in more predictable throughput.
+ Not specifying the sync option may cause unreliable behavior when uploading 
large files.
+ It's recommended to use hard mount. This is because, even after the client 
sends all data to NFS gateway, it may take NFS gateway some extra time to 
transfer data to HDFS when writes were reorderd by NFS client Kernel.
 
 If soft mount has to be used, the user should give it a relatively long 
timeout (at least no less than the default timeout on the host) .
 
 The users can mount the HDFS namespace as shown below:
 
- [root]mount -t nfs -o vers=3,proto=tcp,nolock,noacl $server:/  
$mount_point
+ [root]mount -t nfs -o vers=3,proto=tcp,nolock,noacl,sync $server:/  
$mount_point
 
 Then the users can access HDFS as part of the local file system except that, 
hard link and random write are not supported yet. To optimize the performance 
of large file I/O, one can increase the NFS transfer size(rsize and wsize) 
during mount. By default, NFS gateway supports 1MB as the maximum transfer 
size. For larger data transfer size, one needs to update nfs.rtmax and 
nfs.rtmax in hdfs-site.xml.
 



hadoop git commit: HDFS-7956. Improve logging for DatanodeRegistration. Contributed by Plamen Jeliazkov.

2015-03-24 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 50ee8f4e6 - 970ee3fc5


HDFS-7956. Improve logging for DatanodeRegistration. Contributed by Plamen 
Jeliazkov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/970ee3fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/970ee3fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/970ee3fc

Branch: refs/heads/trunk
Commit: 970ee3fc56a68afade98017296cf9d057f225a46
Parents: 50ee8f4
Author: Plamen Jeliazkov plamenj2...@gmail.com
Authored: Mon Mar 23 23:04:04 2015 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Mon Mar 23 23:05:21 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/970ee3fc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3dd5fb3..3ea1346 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -777,6 +777,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7917. Use file to replace data dirs in test to simulate a disk 
failure.
 (Lei (Eddy) Xu via cnauroth)
 
+HDFS-7956. Improve logging for DatanodeRegistration.
+(Plamen Jeliazkov via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/970ee3fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
index e788137..7119738 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
@@ -100,7 +100,7 @@ public class DatanodeRegistration extends DatanodeID
   @Override
   public String toString() {
 return getClass().getSimpleName()
-  + ( + getIpAddr()
+  + ( + super.toString()
   + , datanodeUuid= + getDatanodeUuid()
   + , infoPort= + getInfoPort()
   + , infoSecurePort= + getInfoSecurePort()



hadoop git commit: HDFS-7956. Improve logging for DatanodeRegistration. Contributed by Plamen Jeliazkov.

2015-03-24 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2f46ee50b - 75d6d9e02


HDFS-7956. Improve logging for DatanodeRegistration. Contributed by Plamen 
Jeliazkov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75d6d9e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75d6d9e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75d6d9e0

Branch: refs/heads/branch-2
Commit: 75d6d9e02279d69b20cb40a1d1eef3396b9c31b9
Parents: 2f46ee5
Author: Plamen Jeliazkov plamenj2...@gmail.com
Authored: Mon Mar 23 23:04:04 2015 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Mon Mar 23 23:07:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75d6d9e0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 15729ef..17adae8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -472,6 +472,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7917. Use file to replace data dirs in test to simulate a disk 
failure.
 (Lei (Eddy) Xu via cnauroth)
 
+HDFS-7956. Improve logging for DatanodeRegistration.
+(Plamen Jeliazkov via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75d6d9e0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
index e788137..7119738 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
@@ -100,7 +100,7 @@ public class DatanodeRegistration extends DatanodeID
   @Override
   public String toString() {
 return getClass().getSimpleName()
-  + ( + getIpAddr()
+  + ( + super.toString()
   + , datanodeUuid= + getDatanodeUuid()
   + , infoPort= + getInfoPort()
   + , infoSecurePort= + getInfoSecurePort()



hadoop git commit: HDFS-7956. Improve logging for DatanodeRegistration. Contributed by Plamen Jeliazkov.

2015-03-24 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 af0af28af - da318cbbd


HDFS-7956. Improve logging for DatanodeRegistration. Contributed by Plamen 
Jeliazkov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da318cbb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da318cbb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da318cbb

Branch: refs/heads/branch-2.7
Commit: da318cbbde9aadb5131cc1a462d84bd9501f4179
Parents: af0af28
Author: Plamen Jeliazkov plamenj2...@gmail.com
Authored: Mon Mar 23 23:04:04 2015 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Mon Mar 23 23:09:52 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da318cbb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7e62d63..e2729a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -447,6 +447,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7917. Use file to replace data dirs in test to simulate a disk 
failure.
 (Lei (Eddy) Xu via cnauroth)
 
+HDFS-7956. Improve logging for DatanodeRegistration.
+(Plamen Jeliazkov via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da318cbb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
index e788137..7119738 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
@@ -100,7 +100,7 @@ public class DatanodeRegistration extends DatanodeID
   @Override
   public String toString() {
 return getClass().getSimpleName()
-  + ( + getIpAddr()
+  + ( + super.toString()
   + , datanodeUuid= + getDatanodeUuid()
   + , infoPort= + getInfoPort()
   + , infoSecurePort= + getInfoSecurePort()



hadoop git commit: YARN-1880. Cleanup TestApplicationClientProtocolOnHA. Contributed by ozawa.

2015-03-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 970ee3fc5 - fbceb3b41


YARN-1880. Cleanup TestApplicationClientProtocolOnHA. Contributed by ozawa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbceb3b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbceb3b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbceb3b4

Branch: refs/heads/trunk
Commit: fbceb3b41834d6899c4353fb24f12ba3ecf67faf
Parents: 970ee3f
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 24 11:57:28 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 24 11:57:58 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../client/TestApplicationClientProtocolOnHA.java   | 16 ++--
 2 files changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbceb3b4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3d9f271..8a5e142 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -58,6 +58,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+YARN-1880. Cleanup TestApplicationClientProtocolOnHA
+(ozawa via harsh)
+
 YARN-3243. CapacityScheduler should pass headroom from parent to children
 to make sure ParentQueue obey its capacity limits. (Wangda Tan via jianhe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbceb3b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
index bfc6656..8e00554 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
@@ -93,7 +93,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   public void testGetApplicationsOnHA() throws Exception {
 ListApplicationReport reports =
 client.getApplications();
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeAppReports(),
 reports);
   }
@@ -101,7 +102,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   @Test(timeout = 15000)
   public void testGetClusterNodesOnHA() throws Exception {
 ListNodeReport reports = client.getNodeReports(NodeState.RUNNING);
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeNodeReports(),
 reports);
   }
@@ -117,8 +119,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   @Test(timeout = 15000)
   public void testGetQueueUserAclsOnHA() throws Exception {
 ListQueueUserACLInfo queueUserAclsList = client.getQueueAclsInfo();
-Assert.assertTrue(queueUserAclsList != null
- !queueUserAclsList.isEmpty());
+Assert.assertTrue(queueUserAclsList != null);
+Assert.assertFalse(queueUserAclsList.isEmpty());
 Assert.assertEquals(cluster.createFakeQueueUserACLInfoList(),
 queueUserAclsList);
   }
@@ -136,7 +138,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   public void testGetApplicationAttemptsOnHA() throws Exception {
 ListApplicationAttemptReport reports =
 client.getApplicationAttempts(cluster.createFakeAppId());
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeApplicationAttemptReports(),
 reports);
   }
@@ -153,7 +156,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   public void testGetContainersOnHA() throws Exception {
 ListContainerReport reports =
 client.getContainers(cluster.createFakeApplicationAttemptId());
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 

hadoop git commit: HDFS-3325. When configuring 'dfs.namenode.safemode.threshold-pct' to a value greater or equal to 1 there is mismatch in the UI report (Contributed by J.Andreina)

2015-03-24 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e7702ac6a - c3e2c20c9


HDFS-3325. When configuring 'dfs.namenode.safemode.threshold-pct' to a value 
greater or equal to 1 there is mismatch in the UI report (Contributed by 
J.Andreina)

(cherry picked from commit c6c396fcd69514ba93583268b2633557c3d74a47)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3e2c20c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3e2c20c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3e2c20c

Branch: refs/heads/branch-2
Commit: c3e2c20c9907d106e1cb9da0562c06de4975bc09
Parents: e7702ac
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Mar 24 12:12:01 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Mar 24 12:13:21 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 4 
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java   | 2 +-
 .../apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java| 2 +-
 4 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3e2c20c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 17adae8..8977904 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -25,6 +25,10 @@ Release 2.8.0 - UNRELEASED
 HDFS-7867. Update action param from start to prepare in rolling upgrade
 javadoc (J.Andreina via vinayakumarb)
 
+HDFS-3325. When configuring dfs.namenode.safemode.threshold-pct to a 
value
+greater or equal to 1 there is mismatch in the UI report
+(J.Andreina via vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3e2c20c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ad0c344..697b975 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5417,7 +5417,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 msg += String.format(
   The reported blocks %d needs additional %d
   +  blocks to reach the threshold %.4f of total blocks %d.%n,
-  blockSafe, (blockThreshold - blockSafe) + 1, threshold, blockTotal);
+blockSafe, (blockThreshold - blockSafe), threshold, 
blockTotal);
 thresholdsMet = false;
   } else {
 msg += String.format(The reported blocks %d has reached the threshold

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3e2c20c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index 2d5bef2..80fe9ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -198,7 +198,7 @@ public class TestSafeMode {
 
 String status = nn.getNamesystem().getSafemode();
 assertEquals(Safe mode is ON. The reported blocks 0 needs additional  +
-15 blocks to reach the threshold 0.9990 of total blocks 15. + 
NEWLINE +
+14 blocks to reach the threshold 0.9990 of total blocks 15. + 
NEWLINE +
 The number of live datanodes 0 has reached the minimum number 0.  +
 Safe mode will be turned off automatically once the thresholds  +
 have been reached., status);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3e2c20c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
 

hadoop git commit: HDFS-3325. When configuring 'dfs.namenode.safemode.threshold-pct' to a value greater or equal to 1 there is mismatch in the UI report (Contributed by J.Andreina)

2015-03-24 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk fbceb3b41 - c6c396fcd


HDFS-3325. When configuring 'dfs.namenode.safemode.threshold-pct' to a value 
greater or equal to 1 there is mismatch in the UI report (Contributed by 
J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6c396fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6c396fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6c396fc

Branch: refs/heads/trunk
Commit: c6c396fcd69514ba93583268b2633557c3d74a47
Parents: fbceb3b
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Mar 24 12:12:01 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Mar 24 12:12:01 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 4 
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java   | 2 +-
 .../apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java| 2 +-
 4 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6c396fc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3ea1346..ee9a5db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -338,6 +338,10 @@ Release 2.8.0 - UNRELEASED
 HDFS-7867. Update action param from start to prepare in rolling upgrade
 javadoc (J.Andreina via vinayakumarb)
 
+HDFS-3325. When configuring dfs.namenode.safemode.threshold-pct to a 
value
+greater or equal to 1 there is mismatch in the UI report
+(J.Andreina via vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6c396fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 71c84b1..34b5e95 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5417,7 +5417,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 msg += String.format(
   The reported blocks %d needs additional %d
   +  blocks to reach the threshold %.4f of total blocks %d.%n,
-  blockSafe, (blockThreshold - blockSafe) + 1, threshold, blockTotal);
+blockSafe, (blockThreshold - blockSafe), threshold, 
blockTotal);
 thresholdsMet = false;
   } else {
 msg += String.format(The reported blocks %d has reached the threshold

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6c396fc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index 2d5bef2..80fe9ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -198,7 +198,7 @@ public class TestSafeMode {
 
 String status = nn.getNamesystem().getSafemode();
 assertEquals(Safe mode is ON. The reported blocks 0 needs additional  +
-15 blocks to reach the threshold 0.9990 of total blocks 15. + 
NEWLINE +
+14 blocks to reach the threshold 0.9990 of total blocks 15. + 
NEWLINE +
 The number of live datanodes 0 has reached the minimum number 0.  +
 Safe mode will be turned off automatically once the thresholds  +
 have been reached., status);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6c396fc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
index c5aad9c..86f3e7b 

hadoop git commit: YARN-3034. Implement RM starting its timeline collector. Contributed by Naganarasimha G R

2015-03-24 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 04de2cecc - dc12cad2b


YARN-3034. Implement RM starting its timeline collector. Contributed by 
Naganarasimha G R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc12cad2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc12cad2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc12cad2

Branch: refs/heads/YARN-2928
Commit: dc12cad2b89f643dafa0def863325cb374c7670c
Parents: 04de2ce
Author: Junping Du junping...@apache.org
Authored: Tue Mar 24 13:42:14 2015 -0700
Committer: Junping Du junping...@apache.org
Committed: Tue Mar 24 13:42:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  14 ++-
 .../src/main/resources/yarn-default.xml |  11 +-
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   4 +
 .../resourcemanager/RMActiveServiceContext.java |  15 +++
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  14 ++-
 .../server/resourcemanager/ResourceManager.java |  35 +--
 .../metrics/SystemMetricsPublisher.java |  29 +++---
 .../timelineservice/RMTimelineCollector.java| 104 +++
 10 files changed, 205 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc12cad2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 259cf64..111de71 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -38,6 +38,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3377. Fixed test failure in TestTimelineServiceClientIntegration.
 (Sangjin Lee via zjshen)
 
+YARN-3034. Implement RM starting its timeline collector. (Naganarasimha G R
+via junping_du)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc12cad2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 73f11b8..13cdcbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -351,12 +351,20 @@ public class YarnConfiguration extends Configuration {
 
   /**
*  The setting that controls whether yarn system metrics is published on the
-   *  timeline server or not by RM.
+   *  timeline server or not by RM. This configuration setting is for ATS V1
*/
-  public static final String RM_SYSTEM_METRICS_PUBLISHER_ENABLED =
-  RM_PREFIX + system-metrics-publisher.enabled;
+  public static final String RM_SYSTEM_METRICS_PUBLISHER_ENABLED = RM_PREFIX
+  + system-metrics-publisher.enabled;
   public static final boolean DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_ENABLED = 
false;
 
+  /**
+   *  The setting that controls whether yarn system metrics is published on the
+   *  timeline server or not by RM and NM. This configuration setting is for 
ATS V2
+   */
+  public static final String SYSTEM_METRICS_PUBLISHER_ENABLED = YARN_PREFIX
+  + system-metrics-publisher.enabled;
+  public static final boolean DEFAULT_SYSTEM_METRICS_PUBLISHER_ENABLED = false;
+
   public static final String RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE =
   RM_PREFIX + system-metrics-publisher.dispatcher.pool-size;
   public static final int 
DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc12cad2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 9ac54ce..c4887b7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -654,12 +654,21 @@
 
   property
 descriptionThe setting that controls whether yarn system metrics is
-published on the timeline server or not by RM./description
+published to the 

[2/2] hadoop git commit: HDFS-7854. Separate class DataStreamer out of DFSOutputStream. Contributed by Li Bo.

2015-03-24 Thread jing9
HDFS-7854. Separate class DataStreamer out of DFSOutputStream. Contributed by 
Li Bo.

(cherry picked from commit a16bfff71bd7f00e06e1f59bfe5445a154bb8c66)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/483f77b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/483f77b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/483f77b7

Branch: refs/heads/branch-2
Commit: 483f77b75b081a96f5e5c8f824f60f5a1b43b16f
Parents: 260dbe9
Author: Jing Zhao ji...@apache.org
Authored: Tue Mar 24 11:06:13 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Tue Mar 24 11:21:10 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |3 +
 .../dev-support/findbugsExcludeFile.xml |2 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 1693 ++---
 .../org/apache/hadoop/hdfs/DataStreamer.java| 1754 ++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |2 +-
 .../apache/hadoop/hdfs/TestDFSOutputStream.java |5 +-
 .../apache/hadoop/hdfs/TestFileCreation.java|   18 +-
 7 files changed, 1893 insertions(+), 1584 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/483f77b7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 56a4f3d..07c9c62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -19,6 +19,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-7829. Code clean up for LocatedBlock. (Takanobu Asanuma via jing9)
 
+HDFS-7854. Separate class DataStreamer out of DFSOutputStream. (Li Bo via
+jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/483f77b7/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index dedeece..224d2fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -77,7 +77,7 @@
   ResponseProccessor is thread that is designed to catch RuntimeException.
  --
  Match
-   Class 
name=org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor /
+   Class name=org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor /
Method name=run /
Bug pattern=REC_CATCH_EXCEPTION /
  /Match

http://git-wip-us.apache.org/repos/asf/hadoop/blob/483f77b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 1b79a69..1c1df91 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -17,29 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
-
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.nio.channels.ClosedChannelException;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -52,64 +35,37 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import 

hadoop git commit: YARN-3383. AdminService should use warn instead of info to log exception when operation fails. (Li Lu via wangda)

2015-03-24 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk eda02540c - 97a7277a2


YARN-3383. AdminService should use warn instead of info to log exception when 
operation fails. (Li Lu via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97a7277a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97a7277a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97a7277a

Branch: refs/heads/trunk
Commit: 97a7277a2d696474b5c8e2d814c8291d4bde246e
Parents: eda0254
Author: Wangda Tan wan...@apache.org
Authored: Tue Mar 24 10:33:09 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Tue Mar 24 10:38:54 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/AdminService.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97a7277a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8a5e142..6a495d9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -103,6 +103,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3241. FairScheduler handles invalid queue names inconsistently. 
 (Zhihai Xu via kasha)
 
+YARN-3383. AdminService should use warn instead of info to log 
exception 
+when operation fails. (Li Lu via wangda)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97a7277a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 22b92c2..12714de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -699,7 +699,7 @@ public class AdminService extends CompositeService 
implements
 
   private YarnException logAndWrapException(Exception exception, String user,
   String argName, String msg) throws YarnException {
-LOG.info(Exception  + msg, exception);
+LOG.warn(Exception  + msg, exception);
 RMAuditLogger.logFailure(user, argName, , 
 AdminService, Exception  + msg);
 return RPCUtil.getRemoteException(exception);



hadoop git commit: HDFS-7977. NFS couldn't take percentile intervals. Contributed by Brandon Li

2015-03-24 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/trunk 97a7277a2 - 570a83ae8


HDFS-7977. NFS couldn't take percentile intervals. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/570a83ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/570a83ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/570a83ae

Branch: refs/heads/trunk
Commit: 570a83ae80faf2076966acf30588733803327844
Parents: 97a7277
Author: Brandon Li brando...@apache.org
Authored: Tue Mar 24 10:49:16 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Tue Mar 24 10:49:16 2015 -0700

--
 .../org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java |  1 -
 .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java   | 13 ++---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  2 ++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md| 12 
 4 files changed, 20 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/570a83ae/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
index 09ee579..05cc0b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
@@ -72,7 +72,6 @@ public class NfsConfigKeys {
   public static final String NFS_HTTPS_ADDRESS_DEFAULT = 0.0.0.0: + 
NFS_HTTPS_PORT_DEFAULT;
   
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_KEY = 
nfs.metrics.percentiles.intervals;
-  public static final String  NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = ;
   
   /*
* HDFS super-user is the user with the same identity as NameNode process

http://git-wip-us.apache.org/repos/asf/hadoop/blob/570a83ae/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
index d36ea73..880a8a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
@@ -90,9 +90,9 @@ public class Nfs3Metrics {
   readNanosQuantiles[i] = registry.newQuantiles(readProcessNanos
   + interval + s, Read process in ns, ops, latency, interval);
   writeNanosQuantiles[i] = registry.newQuantiles(writeProcessNanos
-  + interval + s,  process in ns, ops, latency, interval);
+  + interval + s, Write process in ns, ops, latency, interval);
   commitNanosQuantiles[i] = registry.newQuantiles(commitProcessNanos
-  + interval + s, Read process in ns, ops, latency, interval);
+  + interval + s, Commit process in ns, ops, latency, 
interval);
 }
   }
 
@@ -101,10 +101,9 @@ public class Nfs3Metrics {
 MetricsSystem ms = DefaultMetricsSystem.instance();
 JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);
 
-// Percentile measurement is [,,,] by default 
-int[] intervals = conf.getInts(conf.get(
-NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY,
-NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT));
+// Percentile measurement is [50th,75th,90th,95th,99th] currently 
+int[] intervals = conf
+.getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
 return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
   }
   
@@ -217,4 +216,4 @@ public class Nfs3Metrics {
 }
   }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/570a83ae/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3725a03..5dae029 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1260,6 +1260,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via 
wang)
 
+HDFS-7977. NFS couldn't take percentile intervals (brandonli)
+
 BREAKDOWN OF HDFS-7584 

hadoop git commit: HDFS-7977. NFS couldn't take percentile intervals. Contributed by Brandon Li

2015-03-24 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 29629020c - 260dbe96c


HDFS-7977. NFS couldn't take percentile intervals. Contributed by Brandon Li

(cherry picked from commit 570a83ae80faf2076966acf30588733803327844)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/260dbe96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/260dbe96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/260dbe96

Branch: refs/heads/branch-2
Commit: 260dbe96c3d21ea372c728b2cd0f4a6aeb970010
Parents: 2962902
Author: Brandon Li brando...@apache.org
Authored: Tue Mar 24 10:49:16 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Tue Mar 24 10:50:50 2015 -0700

--
 .../org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java |  1 -
 .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java   | 13 ++---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  2 ++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md| 12 
 4 files changed, 20 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/260dbe96/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
index 09ee579..05cc0b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
@@ -72,7 +72,6 @@ public class NfsConfigKeys {
   public static final String NFS_HTTPS_ADDRESS_DEFAULT = 0.0.0.0: + 
NFS_HTTPS_PORT_DEFAULT;
   
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_KEY = 
nfs.metrics.percentiles.intervals;
-  public static final String  NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = ;
   
   /*
* HDFS super-user is the user with the same identity as NameNode process

http://git-wip-us.apache.org/repos/asf/hadoop/blob/260dbe96/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
index d36ea73..880a8a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
@@ -90,9 +90,9 @@ public class Nfs3Metrics {
   readNanosQuantiles[i] = registry.newQuantiles(readProcessNanos
   + interval + s, Read process in ns, ops, latency, interval);
   writeNanosQuantiles[i] = registry.newQuantiles(writeProcessNanos
-  + interval + s,  process in ns, ops, latency, interval);
+  + interval + s, Write process in ns, ops, latency, interval);
   commitNanosQuantiles[i] = registry.newQuantiles(commitProcessNanos
-  + interval + s, Read process in ns, ops, latency, interval);
+  + interval + s, Commit process in ns, ops, latency, 
interval);
 }
   }
 
@@ -101,10 +101,9 @@ public class Nfs3Metrics {
 MetricsSystem ms = DefaultMetricsSystem.instance();
 JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);
 
-// Percentile measurement is [,,,] by default 
-int[] intervals = conf.getInts(conf.get(
-NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY,
-NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT));
+// Percentile measurement is [50th,75th,90th,95th,99th] currently 
+int[] intervals = conf
+.getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
 return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
   }
   
@@ -217,4 +216,4 @@ public class Nfs3Metrics {
 }
   }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/260dbe96/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 19c5529..56a4f3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -960,6 +960,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via 
wang)
 
+HDFS-7977. NFS 

[2/2] hadoop git commit: HDFS-7854. Separate class DataStreamer out of DFSOutputStream. Contributed by Li Bo.

2015-03-24 Thread jing9
HDFS-7854. Separate class DataStreamer out of DFSOutputStream. Contributed by 
Li Bo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a16bfff7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a16bfff7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a16bfff7

Branch: refs/heads/trunk
Commit: a16bfff71bd7f00e06e1f59bfe5445a154bb8c66
Parents: 570a83a
Author: Jing Zhao ji...@apache.org
Authored: Tue Mar 24 11:06:13 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Tue Mar 24 11:06:13 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |3 +
 .../dev-support/findbugsExcludeFile.xml |2 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 1694 ++---
 .../org/apache/hadoop/hdfs/DataStreamer.java| 1754 ++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |2 +-
 .../apache/hadoop/hdfs/TestDFSOutputStream.java |5 +-
 .../apache/hadoop/hdfs/TestFileCreation.java|   18 +-
 7 files changed, 1893 insertions(+), 1585 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16bfff7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5dae029..4ec0891 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -332,6 +332,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-7829. Code clean up for LocatedBlock. (Takanobu Asanuma via jing9)
 
+HDFS-7854. Separate class DataStreamer out of DFSOutputStream. (Li Bo via
+jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16bfff7/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index dedeece..224d2fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -77,7 +77,7 @@
   ResponseProccessor is thread that is designed to catch RuntimeException.
  --
  Match
-   Class 
name=org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor /
+   Class name=org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor /
Method name=run /
Bug pattern=REC_CATCH_EXCEPTION /
  /Match

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16bfff7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index d7d59af..ee3e6f6 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -17,29 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
-
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.nio.channels.ClosedChannelException;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -52,64 +35,37 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.QuotaExceededException;