hadoop git commit: HADOOP-10387. Misspelling of threshold in log4j.properties for tests in hadoop-common-project. Contributed by Brahma Reddy Battula.

2015-05-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ab7dab55e - 3b36f3657


HADOOP-10387. Misspelling of threshold in log4j.properties for tests in 
hadoop-common-project. Contributed by Brahma Reddy Battula.

(cherry picked from commit 305e4733d639830cb1801efaa2dba8e84b86cc29)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b36f365
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b36f365
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b36f365

Branch: refs/heads/branch-2
Commit: 3b36f36576076ad5a4613db44d5f7a020d97cedd
Parents: ab7dab5
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 7 16:53:57 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 7 16:54:30 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/test/resources/log4j.properties | 2 +-
 .../hadoop-nfs/src/test/resources/log4j.properties| 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b36f365/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4895c15..5b6c7b3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -165,6 +165,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11912. Extra configuration key used in TraceUtils should respect
 prefix (Masatake Iwasaki via Colin P. McCabe)
 
+HADOOP-10387. Misspelling of threshold in log4j.properties for tests in
+hadoop-common-project. (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b36f365/hadoop-common-project/hadoop-common/src/test/resources/log4j.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/resources/log4j.properties 
b/hadoop-common-project/hadoop-common/src/test/resources/log4j.properties
index 1a6baae..ced0687 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/test/resources/log4j.properties
@@ -12,7 +12,7 @@
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=info,stdout
-log4j.threshhold=ALL
+log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b36f365/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties 
b/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
index 1a6baae..ced0687 100644
--- a/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
+++ b/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
@@ -12,7 +12,7 @@
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=info,stdout
-log4j.threshhold=ALL
+log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n



hadoop git commit: HADOOP-10387. Misspelling of threshold in log4j.properties for tests in hadoop-common-project. Contributed by Brahma Reddy Battula.

2015-05-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 449e4426a - 305e4733d


HADOOP-10387. Misspelling of threshold in log4j.properties for tests in 
hadoop-common-project. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/305e4733
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/305e4733
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/305e4733

Branch: refs/heads/trunk
Commit: 305e4733d639830cb1801efaa2dba8e84b86cc29
Parents: 449e442
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 7 16:53:57 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 7 16:53:57 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/test/resources/log4j.properties | 2 +-
 .../hadoop-nfs/src/test/resources/log4j.properties| 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/305e4733/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b89543d..8e90a99 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -618,6 +618,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11912. Extra configuration key used in TraceUtils should respect
 prefix (Masatake Iwasaki via Colin P. McCabe)
 
+HADOOP-10387. Misspelling of threshold in log4j.properties for tests in
+hadoop-common-project. (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/305e4733/hadoop-common-project/hadoop-common/src/test/resources/log4j.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/resources/log4j.properties 
b/hadoop-common-project/hadoop-common/src/test/resources/log4j.properties
index 1a6baae..ced0687 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/test/resources/log4j.properties
@@ -12,7 +12,7 @@
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=info,stdout
-log4j.threshhold=ALL
+log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hadoop/blob/305e4733/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties 
b/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
index 1a6baae..ced0687 100644
--- a/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
+++ b/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
@@ -12,7 +12,7 @@
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=info,stdout
-log4j.threshhold=ALL
+log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n



hadoop git commit: HDFS-8325. Misspelling of threshold in log4j.properties for tests. Contributed by Brahma Reddy Battula.

2015-05-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 918af8eff - 449e4426a


HDFS-8325. Misspelling of threshold in log4j.properties for tests. Contributed 
by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/449e4426
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/449e4426
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/449e4426

Branch: refs/heads/trunk
Commit: 449e4426a5cc1382eef0cbaa9bd4eb2221c89da1
Parents: 918af8e
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 7 15:14:20 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 7 15:14:20 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/test/resources/log4j.properties   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/449e4426/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2141e4a..17faebf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -642,6 +642,9 @@ Release 2.8.0 - UNRELEASED
 required, based on configured failed volumes tolerated.
 (Lei (Eddy) Xu via cnauroth)
 
+HDFS-8325. Misspelling of threshold in log4j.properties for tests.
+(Brahma Reddy Battula via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/449e4426/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
index c29bd1d..ef3e249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
@@ -17,7 +17,7 @@
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=info,stdout
-log4j.threshhold=ALL
+log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n



hadoop git commit: HDFS-8325. Misspelling of threshold in log4j.properties for tests. Contributed by Brahma Reddy Battula.

2015-05-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 67bebabb2 - ab7dab55e


HDFS-8325. Misspelling of threshold in log4j.properties for tests. Contributed 
by Brahma Reddy Battula.

(cherry picked from commit 449e4426a5cc1382eef0cbaa9bd4eb2221c89da1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab7dab55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab7dab55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab7dab55

Branch: refs/heads/branch-2
Commit: ab7dab55ea1faaaee76fea0de4576d2b63aca566
Parents: 67bebab
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 7 15:14:20 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 7 15:15:12 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/test/resources/log4j.properties   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab7dab55/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 055814d..5b358d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -318,6 +318,9 @@ Release 2.8.0 - UNRELEASED
 required, based on configured failed volumes tolerated.
 (Lei (Eddy) Xu via cnauroth)
 
+HDFS-8325. Misspelling of threshold in log4j.properties for tests.
+(Brahma Reddy Battula via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab7dab55/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
index c29bd1d..ef3e249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
@@ -17,7 +17,7 @@
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=info,stdout
-log4j.threshhold=ALL
+log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n



hadoop git commit: HDFS-7980. Incremental BlockReport will dramatically slow down namenode startup. Contributed by Walter Su

2015-05-07 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/trunk daf3e4ef8 - f9427f176


HDFS-7980. Incremental BlockReport will dramatically slow down namenode 
startup.  Contributed by Walter Su


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9427f17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9427f17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9427f17

Branch: refs/heads/trunk
Commit: f9427f1760cce7e0befc3e066cebd0912652a411
Parents: daf3e4e
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu May 7 11:36:35 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Thu May 7 11:36:35 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/BlockManager.java|   8 +-
 .../blockmanagement/TestBlockManager.java   | 108 +++
 3 files changed, 115 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9427f17/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 17faebf..74456db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -710,6 +710,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8305: HDFS INotify: the destination field of RenameOp should always
 end with the file name (cmccabe)
 
+HDFS-7980. Incremental BlockReport will dramatically slow down namenode
+startup.  (Walter Su via szetszwo)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9427f17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 53ffe0b..87cb63c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1810,7 +1810,7 @@ public class BlockManager {
 return !node.hasStaleStorages();
   }
 
-  if (storageInfo.numBlocks() == 0) {
+  if (storageInfo.getBlockReportCount() == 0) {
 // The first block report can be processed a lot more efficiently than
 // ordinary block reports.  This shortens restart times.
 processFirstBlockReport(storageInfo, newReport);
@@ -2064,7 +2064,7 @@ public class BlockManager {
   final BlockListAsLongs report) throws IOException {
 if (report == null) return;
 assert (namesystem.hasWriteLock());
-assert (storageInfo.numBlocks() == 0);
+assert (storageInfo.getBlockReportCount() == 0);
 
 for (BlockReportReplica iblk : report) {
   ReplicaState reportedState = iblk.getState();
@@ -2476,14 +2476,14 @@ public class BlockManager {
 }
 
 // just add it
-storageInfo.addBlock(storedBlock);
+AddBlockResult result = storageInfo.addBlock(storedBlock);
 
 // Now check for completion of blocks and safe block count
 int numCurrentReplica = countLiveNodes(storedBlock);
 if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
  numCurrentReplica = minReplication) {
   completeBlock(storedBlock.getBlockCollection(), storedBlock, false);
-} else if (storedBlock.isComplete()) {
+} else if (storedBlock.isComplete()  result == AddBlockResult.ADDED) {
   // check whether safe replication is reached for the block
   // only complete blocks are counted towards that.
   // In the case that the block just became complete above, completeBlock()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9427f17/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 9ce16f2..1e09e19 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -47,10 

hadoop git commit: HADOOP-11936. Dockerfile references a removed image (aw)

2015-05-07 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4e1f2eb39 - 1886bab0f


HADOOP-11936. Dockerfile references a removed image (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1886bab0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1886bab0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1886bab0

Branch: refs/heads/branch-2
Commit: 1886bab0f159ca77248a9ee90da85385f85e3deb
Parents: 4e1f2eb
Author: Allen Wittenauer a...@apache.org
Authored: Thu May 7 11:46:32 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Thu May 7 11:47:20 2015 -0700

--
 dev-support/docker/Dockerfile   | 12 +++-
 dev-support/docker/hadoop_env_checks.sh |  0
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 start-build-env.sh  |  0
 4 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1886bab0/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 81296dc..f761f8b 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -19,10 +19,20 @@
 # See BUILDING.txt.
 
 # FROM dockerfile/java:openjdk-7-jdk
-FROM dockerfile/java:oracle-java7
+# FROM dockerfile/java:oracle-java7
+FROM ubuntu:trusty
 
 WORKDIR /root
 
+RUN apt-get install -y software-properties-common
+RUN add-apt-repository -y ppa:webupd8team/java
+RUN apt-get update
+
+# Auto-accept the Oracle JDK license
+RUN echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select 
true | sudo /usr/bin/debconf-set-selections
+
+RUN apt-get install -y oracle-java7-installer
+
 # Install dependencies from packages
 RUN apt-get update  apt-get install --no-install-recommends -y \
 git curl ant make maven \

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1886bab0/dev-support/docker/hadoop_env_checks.sh
--
diff --git a/dev-support/docker/hadoop_env_checks.sh 
b/dev-support/docker/hadoop_env_checks.sh
old mode 100644
new mode 100755

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1886bab0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5b6c7b3..dc6705e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -168,6 +168,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-10387. Misspelling of threshold in log4j.properties for tests in
 hadoop-common-project. (Brahma Reddy Battula via aajisaka)
 
+HADOOP-11936. Dockerfile references a removed image (aw)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1886bab0/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
old mode 100644
new mode 100755



hadoop git commit: HDFS-7980. Incremental BlockReport will dramatically slow down namenode startup. Contributed by Walter Su

2015-05-07 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 3d1d551c3 - 74bd9666d


HDFS-7980. Incremental BlockReport will dramatically slow down namenode 
startup.  Contributed by Walter Su


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74bd9666
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74bd9666
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74bd9666

Branch: refs/heads/branch-2.7
Commit: 74bd9666d1860067940a7c8b8ff673acc01e78e1
Parents: 3d1d551
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu May 7 11:36:35 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Thu May 7 11:38:15 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/BlockManager.java|   8 +-
 .../blockmanagement/TestBlockManager.java   | 108 +++
 3 files changed, 115 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74bd9666/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index eb02759..eaade89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -65,6 +65,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8305: HDFS INotify: the destination field of RenameOp should always
 end with the file name (cmccabe)
 
+HDFS-7980. Incremental BlockReport will dramatically slow down namenode
+startup.  (Walter Su via szetszwo)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74bd9666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e0f87c7..d1c8d76 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1805,7 +1805,7 @@ public class BlockManager {
 return !node.hasStaleStorages();
   }
 
-  if (storageInfo.numBlocks() == 0) {
+  if (storageInfo.getBlockReportCount() == 0) {
 // The first block report can be processed a lot more efficiently than
 // ordinary block reports.  This shortens restart times.
 processFirstBlockReport(storageInfo, newReport);
@@ -2059,7 +2059,7 @@ public class BlockManager {
   final BlockListAsLongs report) throws IOException {
 if (report == null) return;
 assert (namesystem.hasWriteLock());
-assert (storageInfo.numBlocks() == 0);
+assert (storageInfo.getBlockReportCount() == 0);
 
 for (BlockReportReplica iblk : report) {
   ReplicaState reportedState = iblk.getState();
@@ -2471,14 +2471,14 @@ public class BlockManager {
 }
 
 // just add it
-storageInfo.addBlock(storedBlock);
+AddBlockResult result = storageInfo.addBlock(storedBlock);
 
 // Now check for completion of blocks and safe block count
 int numCurrentReplica = countLiveNodes(storedBlock);
 if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
  numCurrentReplica = minReplication) {
   completeBlock(storedBlock.getBlockCollection(), storedBlock, false);
-} else if (storedBlock.isComplete()) {
+} else if (storedBlock.isComplete()  result == AddBlockResult.ADDED) {
   // check whether safe replication is reached for the block
   // only complete blocks are counted towards that.
   // In the case that the block just became complete above, completeBlock()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74bd9666/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 91abb2a..fba840e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ 

hadoop git commit: HDFS-8203. Erasure Coding: Seek and other Ops in DFSStripedInputStream. Contributed by Yi Liu.

2015-05-07 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 c61c9c855 - 2d571bfea


HDFS-8203. Erasure Coding: Seek and other Ops in DFSStripedInputStream. 
Contributed by Yi Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d571bfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d571bfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d571bfe

Branch: refs/heads/HDFS-7285
Commit: 2d571bfea7fece9c4f00d6e5c29e01e8ad31e7ed
Parents: c61c9c8
Author: Jing Zhao ji...@apache.org
Authored: Thu May 7 11:06:40 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Thu May 7 11:06:40 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +
 .../hadoop/hdfs/DFSStripedInputStream.java  | 88 +---
 .../hadoop/hdfs/TestWriteReadStripedFile.java   | 83 +++---
 3 files changed, 151 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d571bfe/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 11e8376..fed08e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -186,3 +186,6 @@
 
 HDFS-8129. Erasure Coding: Maintain consistent naming for Erasure Coding 
related classes - EC/ErasureCoding
 (umamahesh)
+
+HDFS-8203. Erasure Coding: Seek and other Ops in DFSStripedInputStream.
+(Yi Liu via jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d571bfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 7cb7b6d..9011192 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -19,10 +19,13 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.*;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.io.ByteBufferPool;
+
 import static org.apache.hadoop.hdfs.util.StripedBlockUtil.ReadPortion;
 import static org.apache.hadoop.hdfs.util.StripedBlockUtil.planReadPortions;
 
@@ -31,9 +34,11 @@ import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
+import java.util.EnumSet;
 import java.util.Set;
 import java.util.Map;
 import java.util.HashMap;
@@ -263,6 +268,10 @@ public class DFSStripedInputStream extends DFSInputStream {
   }
 
   private long getOffsetInBlockGroup() {
+return getOffsetInBlockGroup(pos);
+  }
+
+  private long getOffsetInBlockGroup(long pos) {
 return pos - currentLocatedBlock.getStartOffset();
   }
 
@@ -278,18 +287,22 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 // compute stripe range based on pos
 final long offsetInBlockGroup = getOffsetInBlockGroup();
 final long stripeLen = cellSize * dataBlkNum;
-int stripeIndex = (int) (offsetInBlockGroup / stripeLen);
-curStripeRange = new StripeRange(stripeIndex * stripeLen,
-Math.min(currentLocatedBlock.getBlockSize() - (stripeIndex * 
stripeLen),
-stripeLen));
-final int numCell = (int) ((curStripeRange.length - 1) / cellSize + 1);
+final int stripeIndex = (int) (offsetInBlockGroup / stripeLen);
+final int stripeBufOffset = (int) (offsetInBlockGroup % stripeLen);
+final int stripeLimit = (int) Math.min(currentLocatedBlock.getBlockSize()
+- (stripeIndex * stripeLen), stripeLen);
+curStripeRange = new StripeRange(offsetInBlockGroup,
+stripeLimit - stripeBufOffset);
+
+final int startCell = stripeBufOffset / cellSize;
+final int numCell = (stripeLimit - 1) / cellSize + 1;
 
 // read the whole stripe in parallel
 MapFutureInteger, Integer futures = new HashMap();
-for (int i = 0; i  numCell; i++) {
-  curStripeBuf.position(cellSize * i);
-  

hadoop git commit: HDFS-7980. Incremental BlockReport will dramatically slow down namenode startup. Contributed by Walter Su

2015-05-07 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bb035ff08 - 4e1f2eb39


HDFS-7980. Incremental BlockReport will dramatically slow down namenode 
startup.  Contributed by Walter Su


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e1f2eb3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e1f2eb3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e1f2eb3

Branch: refs/heads/branch-2
Commit: 4e1f2eb3955a97a70cf127dc97ae49201a90f5e0
Parents: bb035ff
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu May 7 11:36:35 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Thu May 7 11:37:33 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/BlockManager.java|   8 +-
 .../blockmanagement/TestBlockManager.java   | 108 +++
 3 files changed, 115 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e1f2eb3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5b358d5..7fd0472 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -386,6 +386,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8305: HDFS INotify: the destination field of RenameOp should always
 end with the file name (cmccabe)
 
+HDFS-7980. Incremental BlockReport will dramatically slow down namenode
+startup.  (Walter Su via szetszwo)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e1f2eb3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 842faac..1b45b0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1812,7 +1812,7 @@ public class BlockManager {
 return !node.hasStaleStorages();
   }
 
-  if (storageInfo.numBlocks() == 0) {
+  if (storageInfo.getBlockReportCount() == 0) {
 // The first block report can be processed a lot more efficiently than
 // ordinary block reports.  This shortens restart times.
 processFirstBlockReport(storageInfo, newReport);
@@ -2066,7 +2066,7 @@ public class BlockManager {
   final BlockListAsLongs report) throws IOException {
 if (report == null) return;
 assert (namesystem.hasWriteLock());
-assert (storageInfo.numBlocks() == 0);
+assert (storageInfo.getBlockReportCount() == 0);
 
 for (BlockReportReplica iblk : report) {
   ReplicaState reportedState = iblk.getState();
@@ -2478,14 +2478,14 @@ public class BlockManager {
 }
 
 // just add it
-storageInfo.addBlock(storedBlock);
+AddBlockResult result = storageInfo.addBlock(storedBlock);
 
 // Now check for completion of blocks and safe block count
 int numCurrentReplica = countLiveNodes(storedBlock);
 if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
  numCurrentReplica = minReplication) {
   completeBlock(storedBlock.getBlockCollection(), storedBlock, false);
-} else if (storedBlock.isComplete()) {
+} else if (storedBlock.isComplete()  result == AddBlockResult.ADDED) {
   // check whether safe replication is reached for the block
   // only complete blocks are counted towards that.
   // In the case that the block just became complete above, completeBlock()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e1f2eb3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 9ce16f2..1e09e19 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -47,10 

hadoop git commit: HADOOP-11936. Dockerfile references a removed image (aw)

2015-05-07 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk f9427f176 - ab5058de8


HADOOP-11936. Dockerfile references a removed image (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab5058de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab5058de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab5058de

Branch: refs/heads/trunk
Commit: ab5058de8e8b9b66b24edb238f25e5315b98c496
Parents: f9427f1
Author: Allen Wittenauer a...@apache.org
Authored: Thu May 7 11:46:32 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Thu May 7 11:46:32 2015 -0700

--
 dev-support/docker/Dockerfile   | 12 +++-
 dev-support/docker/hadoop_env_checks.sh |  0
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 start-build-env.sh  |  0
 4 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab5058de/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 81296dc..f761f8b 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -19,10 +19,20 @@
 # See BUILDING.txt.
 
 # FROM dockerfile/java:openjdk-7-jdk
-FROM dockerfile/java:oracle-java7
+# FROM dockerfile/java:oracle-java7
+FROM ubuntu:trusty
 
 WORKDIR /root
 
+RUN apt-get install -y software-properties-common
+RUN add-apt-repository -y ppa:webupd8team/java
+RUN apt-get update
+
+# Auto-accept the Oracle JDK license
+RUN echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select 
true | sudo /usr/bin/debconf-set-selections
+
+RUN apt-get install -y oracle-java7-installer
+
 # Install dependencies from packages
 RUN apt-get update  apt-get install --no-install-recommends -y \
 git curl ant make maven \

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab5058de/dev-support/docker/hadoop_env_checks.sh
--
diff --git a/dev-support/docker/hadoop_env_checks.sh 
b/dev-support/docker/hadoop_env_checks.sh
old mode 100644
new mode 100755

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab5058de/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8e90a99..ddcd0ba 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -621,6 +621,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-10387. Misspelling of threshold in log4j.properties for tests in
 hadoop-common-project. (Brahma Reddy Battula via aajisaka)
 
+HADOOP-11936. Dockerfile references a removed image (aw)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab5058de/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
old mode 100644
new mode 100755



hadoop git commit: YARN-2918. RM should not fail on startup if queue's configured labels do not exist in cluster-node-labels. Contributed by Wangda Tan

2015-05-07 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4d9f9e546 - f489a4ec9


YARN-2918. RM should not fail on startup if queue's configured labels do not 
exist in cluster-node-labels. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f489a4ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f489a4ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f489a4ec

Branch: refs/heads/trunk
Commit: f489a4ec969f3727d03c8e85d51af1018fc0b2a1
Parents: 4d9f9e5
Author: Jian He jia...@apache.org
Authored: Thu May 7 17:35:41 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Thu May 7 17:35:41 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../ApplicationMasterService.java   |   2 +-
 .../server/resourcemanager/RMAppManager.java|   2 +-
 .../server/resourcemanager/RMServerUtils.java   |   5 +-
 .../scheduler/SchedulerUtils.java   |  67 +
 .../scheduler/capacity/AbstractCSQueue.java |   6 +-
 .../scheduler/capacity/CSQueueUtils.java|  28 +---
 .../CapacitySchedulerConfiguration.java |  40 +-
 .../scheduler/capacity/LeafQueue.java   |   5 +-
 .../scheduler/TestSchedulerUtils.java   | 139 ---
 .../scheduler/capacity/TestQueueParsing.java| 104 +-
 11 files changed, 304 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f489a4ec/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8d0daa3..eed28d8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -333,6 +333,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3584. Fixed attempt diagnostics format shown on the UI. (nijel via
 jianhe)
 
+YARN-2918. RM should not fail on startup if queue's configured labels do
+not exist in cluster-node-labels. (Wangda Tan via jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f489a4ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index 7244b17..cd1dacf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -504,7 +504,7 @@ public class ApplicationMasterService extends 
AbstractService implements
   try {
 RMServerUtils.normalizeAndValidateRequests(ask,
 rScheduler.getMaximumResourceCapability(), app.getQueue(),
-rScheduler);
+rScheduler, rmContext);
   } catch (InvalidResourceRequestException e) {
 LOG.warn(Invalid resource ask by application  + appAttemptId, e);
 throw e;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f489a4ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index ca21f11..7990421 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -383,7 +383,7 @@ public class RMAppManager implements 
EventHandlerRMAppManagerEvent,
   try {
 SchedulerUtils.normalizeAndValidateRequest(amReq,
 

hadoop git commit: YARN-2918. RM should not fail on startup if queue's configured labels do not exist in cluster-node-labels. Contributed by Wangda Tan (cherry picked from commit f489a4ec969f3727d03c8

2015-05-07 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a3abe8d7e - d817fbb34


YARN-2918. RM should not fail on startup if queue's configured labels do not 
exist in cluster-node-labels. Contributed by Wangda Tan
(cherry picked from commit f489a4ec969f3727d03c8e85d51af1018fc0b2a1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d817fbb3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d817fbb3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d817fbb3

Branch: refs/heads/branch-2
Commit: d817fbb34d6e34991c6e512c20d71387750a98f4
Parents: a3abe8d
Author: Jian He jia...@apache.org
Authored: Thu May 7 17:35:41 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Thu May 7 17:36:24 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../ApplicationMasterService.java   |   2 +-
 .../server/resourcemanager/RMAppManager.java|   2 +-
 .../server/resourcemanager/RMServerUtils.java   |   5 +-
 .../scheduler/SchedulerUtils.java   |  67 +
 .../scheduler/capacity/AbstractCSQueue.java |   6 +-
 .../scheduler/capacity/CSQueueUtils.java|  28 +---
 .../CapacitySchedulerConfiguration.java |  40 +-
 .../scheduler/capacity/LeafQueue.java   |   5 +-
 .../scheduler/TestSchedulerUtils.java   | 139 ---
 .../scheduler/capacity/TestQueueParsing.java| 104 +-
 11 files changed, 304 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d817fbb3/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bc19862..6c6dbca 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -288,6 +288,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3584. Fixed attempt diagnostics format shown on the UI. (nijel via
 jianhe)
 
+YARN-2918. RM should not fail on startup if queue's configured labels do
+not exist in cluster-node-labels. (Wangda Tan via jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d817fbb3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index 7244b17..cd1dacf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -504,7 +504,7 @@ public class ApplicationMasterService extends 
AbstractService implements
   try {
 RMServerUtils.normalizeAndValidateRequests(ask,
 rScheduler.getMaximumResourceCapability(), app.getQueue(),
-rScheduler);
+rScheduler, rmContext);
   } catch (InvalidResourceRequestException e) {
 LOG.warn(Invalid resource ask by application  + appAttemptId, e);
 throw e;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d817fbb3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index ca21f11..7990421 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -383,7 +383,7 @@ public class RMAppManager implements 
EventHandlerRMAppManagerEvent,
   try {
 

[1/2] hadoop git commit: HDFS-8129. Erasure Coding: Maintain consistent naming for Erasure Coding related classes - EC/ErasureCoding. Contributed by Uma Maheswara Rao G

2015-05-07 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 2a89e1d33 - c61c9c855


HDFS-8129. Erasure Coding: Maintain consistent naming for Erasure Coding 
related classes - EC/ErasureCoding. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10bfa925
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10bfa925
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10bfa925

Branch: refs/heads/HDFS-7285
Commit: 10bfa9251ac769beb0392288ca541a8c4c94f258
Parents: 0f7eb46
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Thu May 7 16:26:01 2015 +0530
Committer: Uma Maheswara Rao G umamah...@apache.org
Committed: Thu May 7 16:26:01 2015 +0530

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  10 +-
 .../hadoop/hdfs/DFSStripedInputStream.java  |   2 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  10 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|   4 +-
 .../org/apache/hadoop/hdfs/protocol/ECInfo.java |  41 --
 .../apache/hadoop/hdfs/protocol/ECZoneInfo.java |  56 
 .../hadoop/hdfs/protocol/ErasureCodingInfo.java |  41 ++
 .../hdfs/protocol/ErasureCodingZoneInfo.java|  56 
 ...tNamenodeProtocolServerSideTranslatorPB.java |  18 +--
 .../ClientNamenodeProtocolTranslatorPB.java |  16 +--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  24 ++--
 .../hdfs/server/namenode/ECSchemaManager.java   | 127 ---
 .../namenode/ErasureCodingSchemaManager.java| 127 +++
 .../namenode/ErasureCodingZoneManager.java  |  12 +-
 .../hdfs/server/namenode/FSDirectory.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  24 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |   8 +-
 .../hdfs/tools/erasurecode/ECCommand.java   |   4 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |   4 +-
 .../src/main/proto/erasurecoding.proto  |  16 +--
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |   8 +-
 .../org/apache/hadoop/hdfs/TestECSchemas.java   |   2 +-
 .../hadoop/hdfs/TestErasureCodingZones.java |  10 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  10 +-
 .../server/namenode/TestStripedINodeFile.java   |  16 +--
 26 files changed, 328 insertions(+), 325 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10bfa925/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 8729f8a..11e8376 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -183,3 +183,6 @@
 
 HDFS-8334. Erasure coding: rename DFSStripedInputStream related test 
 classes. (Zhe Zhang)
+
+HDFS-8129. Erasure Coding: Maintain consistent naming for Erasure Coding 
related classes - EC/ErasureCoding
+(umamahesh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10bfa925/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 729ebf8..dc22e1f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -118,8 +118,8 @@ import 
org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.ECInfo;
-import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -1181,7 +1181,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 //Get block info from namenode
 TraceScope scope = getPathTraceScope(newDFSInputStream, src);
 try {
-  ECInfo info = getErasureCodingInfo(src);
+  ErasureCodingInfo info = getErasureCodingInfo(src);
   if (info != null) {
 return new DFSStripedInputStream(this, src, verifyChecksum, info);
   } else {
@@ -3096,7 +3096,7 @@ public 

[2/2] hadoop git commit: Merge branch 'HDFS-7285' of https://git-wip-us.apache.org/repos/asf/hadoop into HDFS-7285

2015-05-07 Thread umamahesh
Merge branch 'HDFS-7285' of https://git-wip-us.apache.org/repos/asf/hadoop into 
HDFS-7285


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c61c9c85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c61c9c85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c61c9c85

Branch: refs/heads/HDFS-7285
Commit: c61c9c855e7cd1d20f654c061ff16341ce2d9936
Parents: 10bfa92 2a89e1d
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Thu May 7 16:29:38 2015 +0530
Committer: Uma Maheswara Rao G umamah...@apache.org
Committed: Thu May 7 16:29:38 2015 +0530

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  2 +
 .../hadoop/fs/CommonConfigurationKeys.java  |  4 --
 .../apache/hadoop/io/erasurecode/ECChunk.java   |  2 +-
 .../erasurecode/coder/AbstractErasureCoder.java |  6 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  | 40 +
 .../rawcoder/AbstractRawErasureCoder.java   | 62 +++-
 .../rawcoder/AbstractRawErasureDecoder.java | 54 ++---
 .../rawcoder/AbstractRawErasureEncoder.java | 52 
 .../erasurecode/rawcoder/RawErasureCoder.java   |  8 +--
 .../erasurecode/rawcoder/RawErasureDecoder.java | 24 +---
 .../io/erasurecode/rawcoder/XORRawDecoder.java  | 24 ++--
 .../io/erasurecode/rawcoder/XORRawEncoder.java  |  6 +-
 .../hadoop/io/erasurecode/TestCoderBase.java|  4 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |  6 +-
 14 files changed, 155 insertions(+), 139 deletions(-)
--




hadoop git commit: MAPREDUCE-6356. Misspelling of threshold in log4j.properties for tests. Contributed by Brahma Reddy Battula.

2015-05-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3b36f3657 - f7c011cba


MAPREDUCE-6356. Misspelling of threshold in log4j.properties for tests. 
Contributed by Brahma Reddy Battula.

(cherry picked from commit d335071398fdb14153c854b740814b4fd385b658)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7c011cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7c011cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7c011cb

Branch: refs/heads/branch-2
Commit: f7c011cbaa09d80f5a9cf1e2a977e023bc9fe5a8
Parents: 3b36f36
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 7 19:39:24 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 7 19:41:44 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/test/resources/log4j.properties   | 2 +-
 .../src/test/resources/log4j.properties   | 2 +-
 .../src/test/resources/log4j.properties   | 2 +-
 .../src/test/resources/log4j.properties   | 2 +-
 .../src/test/resources/log4j.properties   | 2 +-
 .../src/test/resources/log4j.properties   | 2 +-
 7 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7c011cb/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9c1dfc4..35e0da4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -122,6 +122,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6165. [JDK8] TestCombineFileInputFormat failed on JDK8.
 (Akira AJISAKA via ozawa)
 
+MAPREDUCE-6356. Misspelling of threshold in log4j.properties for tests.
+(Brahma Reddy Battula via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7c011cb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/resources/log4j.properties
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/resources/log4j.properties
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/resources/log4j.properties
index 531b68b..81a3f6a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/resources/log4j.properties
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/resources/log4j.properties
@@ -13,7 +13,7 @@
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=info,stdout
-log4j.threshhold=ALL
+log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} 
(%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7c011cb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/resources/log4j.properties
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/resources/log4j.properties
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/resources/log4j.properties
index 531b68b..81a3f6a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/resources/log4j.properties
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/resources/log4j.properties
@@ -13,7 +13,7 @@
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=info,stdout
-log4j.threshhold=ALL
+log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} 
(%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7c011cb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/resources/log4j.properties
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/resources/log4j.properties
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/resources/log4j.properties
index 531b68b..81a3f6a 100644
--- 

hadoop git commit: YARN-3523. Cleanup ResourceManagerAdministrationProtocol interface audience. Contributed by Naganarasimha G R

2015-05-07 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/trunk d33507139 - 8e991f4b1


YARN-3523. Cleanup ResourceManagerAdministrationProtocol interface audience. 
Contributed by Naganarasimha G R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e991f4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e991f4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e991f4b

Branch: refs/heads/trunk
Commit: 8e991f4b1d7226fdcd75c5dc9fe6e5ce721679b9
Parents: d335071
Author: Junping Du junping...@apache.org
Authored: Thu May 7 05:39:04 2015 -0700
Committer: Junping Du junping...@apache.org
Committed: Thu May 7 05:39:04 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  4 +++
 .../ResourceManagerAdministrationProtocol.java  | 37 ++--
 2 files changed, 15 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e991f4b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8f5d724..55c65f5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -177,11 +177,15 @@ Release 2.8.0 - UNRELEASED
 YARN-3363. add localization and container launch time to ContainerMetrics
 at NM to show these timing information for each active container.
 (zxu via rkanter)
+
 YARN-3396. Handle URISyntaxException in ResourceLocalizationService. 
 (Brahma Reddy Battula via junping_du)
 
 YARN-3491. PublicLocalizer#addResource is too slow. (zxu via rkanter)
 
+YARN-3523. Cleanup ResourceManagerAdministrationProtocol interface 
audience.
+(Naganarasimha G R via junping_du)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e991f4b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
index 0cfa8ce..36dfbc0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
@@ -21,9 +21,6 @@ package org.apache.hadoop.yarn.server.api;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
-import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
@@ -54,45 +51,38 @@ import 
org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceReque
 import 
org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
 
 @Private
-@Stable
 public interface ResourceManagerAdministrationProtocol extends 
GetUserMappingsProtocol {
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) 
   throws StandbyException, YarnException, IOException;
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
   throws StandbyException, YarnException, IOException;
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshSuperUserGroupsConfigurationResponse 
   refreshSuperUserGroupsConfiguration(
   RefreshSuperUserGroupsConfigurationRequest request)
   throws StandbyException, YarnException, IOException;
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
   RefreshUserToGroupsMappingsRequest request)
   throws StandbyException, YarnException, IOException;
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshAdminAclsResponse refreshAdminAcls(
   RefreshAdminAclsRequest request)
   throws YarnException, IOException;
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshServiceAclsResponse refreshServiceAcls(
   RefreshServiceAclsRequest request)
@@ -112,33 +102,28 @@ public interface 

hadoop git commit: YARN-3523. Cleanup ResourceManagerAdministrationProtocol interface audience. Contributed by Naganarasimha G R (cherry picked from commit 8e991f4b1d7226fdcd75c5dc9fe6e5ce721679b9)

2015-05-07 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f7c011cba - 684a5a6ae


YARN-3523. Cleanup ResourceManagerAdministrationProtocol interface audience. 
Contributed by Naganarasimha G R
(cherry picked from commit 8e991f4b1d7226fdcd75c5dc9fe6e5ce721679b9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/684a5a6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/684a5a6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/684a5a6a

Branch: refs/heads/branch-2
Commit: 684a5a6aeb7fbd74c438852acb6526d81a66b305
Parents: f7c011c
Author: Junping Du junping...@apache.org
Authored: Thu May 7 05:39:04 2015 -0700
Committer: Junping Du junping...@apache.org
Committed: Thu May 7 05:40:13 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  4 +++
 .../ResourceManagerAdministrationProtocol.java  | 37 ++--
 2 files changed, 15 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/684a5a6a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c9e4889..f9f125e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -129,6 +129,7 @@ Release 2.8.0 - UNRELEASED
 YARN-3363. add localization and container launch time to ContainerMetrics
 at NM to show these timing information for each active container.
 (zxu via rkanter)
+
 YARN-3396. Handle URISyntaxException in ResourceLocalizationService. 
 (Brahma Reddy Battula via junping_du)
 
@@ -137,6 +138,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3491. PublicLocalizer#addResource is too slow. (zxu via rkanter)
 
+YARN-3523. Cleanup ResourceManagerAdministrationProtocol interface 
audience.
+(Naganarasimha G R via junping_du)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/684a5a6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
index 0cfa8ce..36dfbc0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
@@ -21,9 +21,6 @@ package org.apache.hadoop.yarn.server.api;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
-import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
@@ -54,45 +51,38 @@ import 
org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceReque
 import 
org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
 
 @Private
-@Stable
 public interface ResourceManagerAdministrationProtocol extends 
GetUserMappingsProtocol {
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) 
   throws StandbyException, YarnException, IOException;
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
   throws StandbyException, YarnException, IOException;
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshSuperUserGroupsConfigurationResponse 
   refreshSuperUserGroupsConfiguration(
   RefreshSuperUserGroupsConfigurationRequest request)
   throws StandbyException, YarnException, IOException;
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
   RefreshUserToGroupsMappingsRequest request)
   throws StandbyException, YarnException, IOException;
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshAdminAclsResponse refreshAdminAcls(
   RefreshAdminAclsRequest request)
   throws YarnException, IOException;
 
-  @Public
-  @Stable
+  @Private
   @Idempotent
   public RefreshServiceAclsResponse 

hadoop git commit: HDFS-8067. haadmin prints out stale help messages (Contributed by Ajith S)

2015-05-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 00954714a - d0fff5d32


HDFS-8067. haadmin prints out stale help messages (Contributed by Ajith S)

(cherry picked from commit 66988476d09a6d04c0b81a663db1e6e5a28c37fb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0fff5d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0fff5d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0fff5d3

Branch: refs/heads/branch-2
Commit: d0fff5d3282a6a823a29b2ed651adbe6cec3caff
Parents: 0095471
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 8 10:45:21 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri May 8 10:47:23 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0fff5d3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fd2684c..17f9144 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -330,6 +330,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8257. Namenode rollingUpgrade option is incorrect in document
 (J.Andreina via vinayakumarb)
 
+HDFS-8067. haadmin prints out stale help messages (Ajith S via 
vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0fff5d3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
index aa69dca..4c0ddb2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
@@ -90,7 +90,7 @@ public class DFSHAAdmin extends HAAdmin {
 
   @Override
   protected String getUsageString() {
-return Usage: haadmin;
+return Usage: haadmin [-ns nameserviceId];
   }
 
   @Override



hadoop git commit: HDFS-6291. FSImage may be left unclosed in BootstrapStandby#doRun() ( Contributed by Sanghyun Yun)

2015-05-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7fe1d7400 - 089d420a8


HDFS-6291. FSImage may be left unclosed in BootstrapStandby#doRun() ( 
Contributed by Sanghyun Yun)

(cherry picked from commit e5e492a9631ff78302fccedcb64d7b64b9407991)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/089d420a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/089d420a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/089d420a

Branch: refs/heads/branch-2
Commit: 089d420a824ced46b806d0e5156999f0abad66aa
Parents: 7fe1d74
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 8 11:25:24 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri May 8 11:26:56 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java   | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/089d420a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a51968d..ab00727 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -334,6 +334,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8174. Update replication count to live rep count in fsck report. 
(J.Andreina)
 
+HDFS-6291. FSImage may be left unclosed in BootstrapStandby#doRun()
+(Sanghyun Yun via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/089d420a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
index 5981b4f..7038dc0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
@@ -320,8 +320,9 @@ public class BootstrapStandby implements Tool, Configurable 
{
   image.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, imageTxId,
   hash);
 } catch (IOException ioe) {
-  image.close();
   throw ioe;
+} finally {
+  image.close();
 }
 return 0;
   }



hadoop git commit: HADOOP-11922. Misspelling of threshold in log4j.properties for tests in hadoop-tools (Contributed by Gabor Liptak)

2015-05-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c7f3dd1eb - 14d88a85e


HADOOP-11922. Misspelling of threshold in log4j.properties for tests in 
hadoop-tools (Contributed by Gabor Liptak)

(cherry picked from commit f33efbc82709e75d9b4c4ed9835629668dbade7d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14d88a85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14d88a85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14d88a85

Branch: refs/heads/branch-2
Commit: 14d88a85e490cd844995b9d8daeff78752dca9c1
Parents: c7f3dd1
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 8 10:22:11 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri May 8 10:23:27 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 hadoop-tools/hadoop-aws/src/test/resources/log4j.properties   | 2 +-
 hadoop-tools/hadoop-azure/src/test/resources/log4j.properties | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d88a85/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index dc6705e..ba7e3be 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -170,6 +170,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-11936. Dockerfile references a removed image (aw)
 
+HADOOP-11922. Misspelling of threshold in log4j.properties for tests
+in hadoop-tools (Gabor Liptak via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d88a85/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties
--
diff --git a/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties 
b/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties
index 1a6baae..ced0687 100644
--- a/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties
+++ b/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties
@@ -12,7 +12,7 @@
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=info,stdout
-log4j.threshhold=ALL
+log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d88a85/hadoop-tools/hadoop-azure/src/test/resources/log4j.properties
--
diff --git a/hadoop-tools/hadoop-azure/src/test/resources/log4j.properties 
b/hadoop-tools/hadoop-azure/src/test/resources/log4j.properties
index 81b935b..bb5cbe5 100644
--- a/hadoop-tools/hadoop-azure/src/test/resources/log4j.properties
+++ b/hadoop-tools/hadoop-azure/src/test/resources/log4j.properties
@@ -17,7 +17,7 @@
 # log4j configuration used during build and unit tests
 
 log4j.rootLogger=INFO,stdout
-log4j.threshhold=ALL
+log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n



hadoop git commit: HDFS-8257. Namenode rollingUpgrade option is incorrect in document (Contributed by J.Andreina)

2015-05-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk f33efbc82 - c7c26a1e4


HDFS-8257. Namenode rollingUpgrade option is incorrect in document (Contributed 
by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7c26a1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7c26a1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7c26a1e

Branch: refs/heads/trunk
Commit: c7c26a1e4aff0b89016ec838d06ba2b628a6808e
Parents: f33efbc
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 8 10:35:25 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri May 8 10:35:25 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7c26a1e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b4f2042..4fcf524 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -651,6 +651,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8037. CheckAccess in WebHDFS silently accepts malformed FsActions
 parameters. (wheat9)
 
+HDFS-8257. Namenode rollingUpgrade option is incorrect in document
+(J.Andreina via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7c26a1e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index bdb051b..254a77c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -419,7 +419,7 @@ Usage:
   [-upgrade [-clusterid cid] [-renameReservedk-v pairs] ] |
   [-upgradeOnly [-clusterid cid] [-renameReservedk-v pairs] ] |
   [-rollback] |
-  [-rollingUpgrade downgrade |rollback ] |
+  [-rollingUpgrade rollback |started ] |
   [-finalize] |
   [-importCheckpoint] |
   [-initializeSharedEdits] |



hadoop git commit: YARN-3572. Correct typos in WritingYarnApplications.md. Contributed by Gabor Liptak.

2015-05-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 170f1b0af - c7f3dd1eb


YARN-3572. Correct typos in WritingYarnApplications.md. Contributed by Gabor 
Liptak.

(cherry picked from commit a521b509551e092dfeb38cdf29bb96556d3e0266)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7f3dd1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7f3dd1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7f3dd1e

Branch: refs/heads/branch-2
Commit: c7f3dd1eb3b19a70dee137a4aac4ed3a2115f459
Parents: 170f1b0
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri May 8 13:24:29 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri May 8 13:25:07 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../src/site/markdown/WritingYarnApplications.md | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7f3dd1e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e871ef0..b49940d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -294,6 +294,9 @@ Release 2.8.0 - UNRELEASED
 YARN-1832. Fix wrong MockLocalizerStatus#equals implementation.
 (Hong Zhiguo via aajisaka)
 
+YARN-3572. Correct typos in WritingYarnApplications.md.
+(Gabor Liptak via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7f3dd1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
index 03b2964..d5a7a17 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
@@ -535,8 +535,8 @@ You can use the LocalResource to add resources to your 
application request. This
 
 ```java
 File packageFile = new File(packagePath);
-Url packageUrl = ConverterUtils.getYarnUrlFromPath(
-FileContext.getFileContext.makeQualified(new Path(packagePath)));
+URL packageUrl = ConverterUtils.getYarnUrlFromPath(
+FileContext.getFileContext().makeQualified(new Path(packagePath)));
 
 packageResource.setResource(packageUrl);
 packageResource.setSize(packageFile.length());



hadoop git commit: HDFS-6291. FSImage may be left unclosed in BootstrapStandby#doRun() ( Contributed by Sanghyun Yun)

2015-05-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8bafafadf - e5e492a96


HDFS-6291. FSImage may be left unclosed in BootstrapStandby#doRun() ( 
Contributed by Sanghyun Yun)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5e492a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5e492a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5e492a9

Branch: refs/heads/trunk
Commit: e5e492a9631ff78302fccedcb64d7b64b9407991
Parents: 8bafafa
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 8 11:25:24 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri May 8 11:25:24 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java   | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e492a9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 916e327..76cd03b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -658,6 +658,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8174. Update replication count to live rep count in fsck report. 
(J.Andreina)
 
+HDFS-6291. FSImage may be left unclosed in BootstrapStandby#doRun()
+(Sanghyun Yun via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e492a9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
index 0accf53..88d9a6a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
@@ -320,8 +320,9 @@ public class BootstrapStandby implements Tool, Configurable 
{
   image.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, imageTxId,
   hash);
 } catch (IOException ioe) {
-  image.close();
   throw ioe;
+} finally {
+  image.close();
 }
 return 0;
   }



hadoop git commit: YARN-3572. Correct typos in WritingYarnApplications.md. Contributed by Gabor Liptak.

2015-05-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk b167fe760 - a521b5095


YARN-3572. Correct typos in WritingYarnApplications.md. Contributed by Gabor 
Liptak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a521b509
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a521b509
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a521b509

Branch: refs/heads/trunk
Commit: a521b509551e092dfeb38cdf29bb96556d3e0266
Parents: b167fe7
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri May 8 13:24:29 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri May 8 13:24:29 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../src/site/markdown/WritingYarnApplications.md | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a521b509/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fac3fa5..f8a2cc0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -339,6 +339,9 @@ Release 2.8.0 - UNRELEASED
 YARN-1832. Fix wrong MockLocalizerStatus#equals implementation.
 (Hong Zhiguo via aajisaka)
 
+YARN-3572. Correct typos in WritingYarnApplications.md.
+(Gabor Liptak via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a521b509/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
index 03b2964..d5a7a17 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
@@ -535,8 +535,8 @@ You can use the LocalResource to add resources to your 
application request. This
 
 ```java
 File packageFile = new File(packagePath);
-Url packageUrl = ConverterUtils.getYarnUrlFromPath(
-FileContext.getFileContext.makeQualified(new Path(packagePath)));
+URL packageUrl = ConverterUtils.getYarnUrlFromPath(
+FileContext.getFileContext().makeQualified(new Path(packagePath)));
 
 packageResource.setResource(packageUrl);
 packageResource.setSize(packageFile.length());



hadoop git commit: HDFS-8067. haadmin prints out stale help messages (Contributed by Ajith S)

2015-05-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk c7c26a1e4 - 66988476d


HDFS-8067. haadmin prints out stale help messages (Contributed by Ajith S)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66988476
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66988476
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66988476

Branch: refs/heads/trunk
Commit: 66988476d09a6d04c0b81a663db1e6e5a28c37fb
Parents: c7c26a1
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 8 10:45:21 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri May 8 10:45:21 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66988476/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4fcf524..b9443b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -654,6 +654,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8257. Namenode rollingUpgrade option is incorrect in document
 (J.Andreina via vinayakumarb)
 
+HDFS-8067. haadmin prints out stale help messages (Ajith S via 
vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66988476/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
index aa69dca..4c0ddb2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
@@ -90,7 +90,7 @@ public class DFSHAAdmin extends HAAdmin {
 
   @Override
   protected String getUsageString() {
-return Usage: haadmin;
+return Usage: haadmin [-ns nameserviceId];
   }
 
   @Override



hadoop git commit: YARN-3592. Fix typos in RMNodeLabelsManager. Contributed by Sunil G.

2015-05-07 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/trunk 66988476d - a28cd02ba


YARN-3592. Fix typos in RMNodeLabelsManager. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a28cd02b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a28cd02b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a28cd02b

Branch: refs/heads/trunk
Commit: a28cd02ba026733e409807f3ea1b36f7d57bc273
Parents: 6698847
Author: Devaraj K deva...@apache.org
Authored: Fri May 8 10:54:36 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Fri May 8 10:54:36 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   |  2 ++
 .../resourcemanager/nodelabels/RMNodeLabelsManager.java   | 10 +-
 2 files changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a28cd02b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f8a2cc0..a698a7d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -342,6 +342,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3572. Correct typos in WritingYarnApplications.md.
 (Gabor Liptak via aajisaka)
 
+YARN-3592. Fix typos in RMNodeLabelsManager. (Sunil G via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a28cd02b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
index 25e5bc09..696b99b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
@@ -46,11 +46,11 @@ import com.google.common.collect.ImmutableSet;
 
 public class RMNodeLabelsManager extends CommonNodeLabelsManager {
   protected static class Queue {
-protected SetString acccessibleNodeLabels;
+protected SetString accessibleNodeLabels;
 protected Resource resource;
 
 protected Queue() {
-  acccessibleNodeLabels =
+  accessibleNodeLabels =
   Collections.newSetFromMap(new ConcurrentHashMapString, Boolean());
   resource = Resource.newInstance(0, 0);
 }
@@ -98,7 +98,7 @@ public class RMNodeLabelsManager extends 
CommonNodeLabelsManager {
   // check if any queue contains this label
   for (EntryString, Queue entry : queueCollections.entrySet()) {
 String queueName = entry.getKey();
-SetString queueLabels = entry.getValue().acccessibleNodeLabels;
+SetString queueLabels = entry.getValue().accessibleNodeLabels;
 if (queueLabels.contains(label)) {
   throw new IOException(Cannot remove label= + label
   + , because queue= + queueName +  is using this label. 
@@ -275,7 +275,7 @@ public class RMNodeLabelsManager extends 
CommonNodeLabelsManager {
   continue;
 }
 
-q.acccessibleNodeLabels.addAll(labels);
+q.accessibleNodeLabels.addAll(labels);
 for (Host host : nodeCollections.values()) {
   for (EntryNodeId, Node nentry : host.nms.entrySet()) {
 NodeId nodeId = nentry.getKey();
@@ -468,7 +468,7 @@ public class RMNodeLabelsManager extends 
CommonNodeLabelsManager {
 }
 
 for (String label : nodeLabels) {
-  if (q.acccessibleNodeLabels.contains(label)) {
+  if (q.accessibleNodeLabels.contains(label)) {
 return true;
   }
 }



hadoop git commit: YARN-3592. Fix typos in RMNodeLabelsManager. Contributed by Sunil G.

2015-05-07 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d0fff5d32 - dce2381dc


YARN-3592. Fix typos in RMNodeLabelsManager. Contributed by Sunil G.

(cherry picked from commit a28cd02ba026733e409807f3ea1b36f7d57bc273)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dce2381d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dce2381d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dce2381d

Branch: refs/heads/branch-2
Commit: dce2381dc4a877fcbfb869f115152ecd44a92173
Parents: d0fff5d
Author: Devaraj K deva...@apache.org
Authored: Fri May 8 10:54:36 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Fri May 8 10:55:53 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   |  2 ++
 .../resourcemanager/nodelabels/RMNodeLabelsManager.java   | 10 +-
 2 files changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce2381d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b49940d..0523f4f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -297,6 +297,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3572. Correct typos in WritingYarnApplications.md.
 (Gabor Liptak via aajisaka)
 
+YARN-3592. Fix typos in RMNodeLabelsManager. (Sunil G via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce2381d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
index 25e5bc09..696b99b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
@@ -46,11 +46,11 @@ import com.google.common.collect.ImmutableSet;
 
 public class RMNodeLabelsManager extends CommonNodeLabelsManager {
   protected static class Queue {
-protected SetString acccessibleNodeLabels;
+protected SetString accessibleNodeLabels;
 protected Resource resource;
 
 protected Queue() {
-  acccessibleNodeLabels =
+  accessibleNodeLabels =
   Collections.newSetFromMap(new ConcurrentHashMapString, Boolean());
   resource = Resource.newInstance(0, 0);
 }
@@ -98,7 +98,7 @@ public class RMNodeLabelsManager extends 
CommonNodeLabelsManager {
   // check if any queue contains this label
   for (EntryString, Queue entry : queueCollections.entrySet()) {
 String queueName = entry.getKey();
-SetString queueLabels = entry.getValue().acccessibleNodeLabels;
+SetString queueLabels = entry.getValue().accessibleNodeLabels;
 if (queueLabels.contains(label)) {
   throw new IOException(Cannot remove label= + label
   + , because queue= + queueName +  is using this label. 
@@ -275,7 +275,7 @@ public class RMNodeLabelsManager extends 
CommonNodeLabelsManager {
   continue;
 }
 
-q.acccessibleNodeLabels.addAll(labels);
+q.accessibleNodeLabels.addAll(labels);
 for (Host host : nodeCollections.values()) {
   for (EntryNodeId, Node nentry : host.nms.entrySet()) {
 NodeId nodeId = nentry.getKey();
@@ -468,7 +468,7 @@ public class RMNodeLabelsManager extends 
CommonNodeLabelsManager {
 }
 
 for (String label : nodeLabels) {
-  if (q.acccessibleNodeLabels.contains(label)) {
+  if (q.accessibleNodeLabels.contains(label)) {
 return true;
   }
 }



[2/2] hadoop git commit: Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk

2015-05-07 Thread umamahesh
Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into 
trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bafafad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bafafad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bafafad

Branch: refs/heads/trunk
Commit: 8bafafadfd784a6fa4a5290c6e852b1c696642e9
Parents: 2ea0f2f a28cd02
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Fri May 8 11:02:37 2015 +0530
Committer: Uma Maheswara Rao G umamah...@apache.org
Committed: Fri May 8 11:02:37 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   |  2 ++
 .../resourcemanager/nodelabels/RMNodeLabelsManager.java   | 10 +-
 2 files changed, 7 insertions(+), 5 deletions(-)
--




[1/2] hadoop git commit: HDFS-8174. Update replication count to live rep count in fsck report. Contributed by J.Andreina

2015-05-07 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/trunk a28cd02ba - 8bafafadf


HDFS-8174. Update replication count to live rep count in fsck report. 
Contributed by  J.Andreina


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ea0f2fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ea0f2fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ea0f2fc

Branch: refs/heads/trunk
Commit: 2ea0f2fc938febd7fbbe03656a91ae3db1409c50
Parents: 6698847
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Fri May 8 11:01:51 2015 +0530
Committer: Uma Maheswara Rao G umamah...@apache.org
Committed: Fri May 8 11:01:51 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 2 ++
 .../java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java  | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ea0f2fc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b9443b4..916e327 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -656,6 +656,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8067. haadmin prints out stale help messages (Ajith S via 
vinayakumarb)
 
+HDFS-8174. Update replication count to live rep count in fsck report. 
(J.Andreina)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ea0f2fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index ac77394..11e89c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -631,7 +631,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 missing++;
 missize += block.getNumBytes();
   } else {
-report.append( repl= + liveReplicas);
+report.append( Live_repl= + liveReplicas);
 if (showLocations || showRacks || showReplicaDetails) {
   StringBuilder sb = new StringBuilder([);
   IterableDatanodeStorageInfo storages = 
bm.getStorages(block.getLocalBlock());



hadoop git commit: YARN-1832. Fix wrong MockLocalizerStatus#equals implementation. Contributed by Hong Zhiguo.

2015-05-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d817fbb34 - 170f1b0af


YARN-1832. Fix wrong MockLocalizerStatus#equals implementation. Contributed by 
Hong Zhiguo.

(cherry picked from commit b167fe7605deb29ec533047d79d036eb65328853)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/170f1b0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/170f1b0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/170f1b0a

Branch: refs/heads/branch-2
Commit: 170f1b0afdac88e84fdf188fc90545ae8f21ddd7
Parents: d817fbb
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri May 8 11:14:45 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri May 8 11:15:28 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../containermanager/localizer/MockLocalizerStatus.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/170f1b0a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6c6dbca..e871ef0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -291,6 +291,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2918. RM should not fail on startup if queue's configured labels do
 not exist in cluster-node-labels. (Wangda Tan via jianhe)
 
+YARN-1832. Fix wrong MockLocalizerStatus#equals implementation.
+(Hong Zhiguo via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/170f1b0a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerStatus.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerStatus.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerStatus.java
index f4e5d23..95acfe34 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerStatus.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerStatus.java
@@ -67,7 +67,7 @@ public class MockLocalizerStatus implements LocalizerStatus {
   return false;
 }
 MockLocalizerStatus other = (MockLocalizerStatus) o;
-return getLocalizerId().equals(other)
+return getLocalizerId().equals(other.getLocalizerId())
getResources().containsAll(other.getResources())
other.getResources().containsAll(getResources());
   }



hadoop git commit: HDFS-8174. Update replication count to live rep count in fsck report. Contributed by J.Andreina

2015-05-07 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 dce2381dc - 7fe1d7400


HDFS-8174. Update replication count to live rep count in fsck report. 
Contributed by  J.Andreina

(cherry picked from commit 2ea0f2fc938febd7fbbe03656a91ae3db1409c50)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fe1d740
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fe1d740
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fe1d740

Branch: refs/heads/branch-2
Commit: 7fe1d74005d8dd90e5d87c47b5b750230dc6201d
Parents: dce2381
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Fri May 8 11:01:51 2015 +0530
Committer: Uma Maheswara Rao G umamah...@apache.org
Committed: Fri May 8 11:07:58 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 2 ++
 .../java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java  | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe1d740/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 17f9144..a51968d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -332,6 +332,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8067. haadmin prints out stale help messages (Ajith S via 
vinayakumarb)
 
+HDFS-8174. Update replication count to live rep count in fsck report. 
(J.Andreina)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe1d740/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 5ea8b8b..7c24d44 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -620,7 +620,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 missing++;
 missize += block.getNumBytes();
   } else {
-report.append( repl= + liveReplicas);
+report.append( Live_repl= + liveReplicas);
 if (showLocations || showRacks || showReplicaDetails) {
   StringBuilder sb = new StringBuilder([);
   IterableDatanodeStorageInfo storages = 
bm.getStorages(block.getLocalBlock());



hadoop git commit: YARN-3584. Fixed attempt diagnostics format shown on the UI. Contributed by nijel

2015-05-07 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk ab5058de8 - b88700dcd


YARN-3584. Fixed attempt diagnostics format shown on the UI. Contributed by 
nijel


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b88700dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b88700dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b88700dc

Branch: refs/heads/trunk
Commit: b88700dcd0b9aa47662009241dfb83bc4446548d
Parents: ab5058d
Author: Jian He jia...@apache.org
Authored: Thu May 7 13:54:07 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Thu May 7 13:54:07 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../resourcemanager/rmapp/attempt/RMAppAttemptImpl.java  | 8 
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b88700dc/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ff7921c..8d0daa3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -330,6 +330,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3577. Misspelling of threshold in log4j.properties for tests.
 (Brahma Reddy Battula via aajisaka)
 
+YARN-3584. Fixed attempt diagnostics format shown on the UI. (nijel via
+jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b88700dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 8abc65a..684dde8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1455,14 +1455,14 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   finishEvent.getApplicationAttemptId()).append(
exited with ).append( exitCode: ).append(status.getExitStatus()).
   append(\n);
+diagnosticsBuilder.append(Failing this attempt.).append(Diagnostics: )
+.append(status.getDiagnostics());
 if (this.getTrackingUrl() != null) {
   diagnosticsBuilder.append(For more detailed output,).append(
- check application tracking page:).append(
+ check application tracking page: ).append(
 this.getTrackingUrl()).append(
-Then, click on links to logs of each attempt.\n);
+ Then, click on links to logs of each attempt.\n);
 }
-diagnosticsBuilder.append(Diagnostics: ).append(status.getDiagnostics())
-.append(Failing this attempt);
 return diagnosticsBuilder.toString();
   }
 



hadoop git commit: MAPREDUCE-6279. AM should explicity exit JVM after all services have stopped. Contributed by Eric Payne (cherry picked from commit f30065c8b6099372f57015b505434120fe83c2b0)

2015-05-07 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c35114a1e - 976a3c1f9


MAPREDUCE-6279. AM should explicity exit JVM after all services have stopped. 
Contributed by Eric Payne
(cherry picked from commit f30065c8b6099372f57015b505434120fe83c2b0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/976a3c1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/976a3c1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/976a3c1f

Branch: refs/heads/branch-2
Commit: 976a3c1f9d9241ae45c8403d46e3738cb3a01d2a
Parents: c35114a
Author: Jason Lowe jl...@apache.org
Authored: Thu May 7 22:05:12 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Thu May 7 22:06:54 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../hadoop/mapreduce/v2/app/MRAppMaster.java| 32 -
 .../mapreduce/v2/app/TestMRAppMaster.java   | 49 +++-
 3 files changed, 81 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/976a3c1f/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 35e0da4..e903cb8 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -55,6 +55,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6192. Create unit test to automatically compare
 MR related classes and mapred-default.xml (rchiang via rkanter)
 
+MAPREDUCE-6279. AM should explicity exit JVM after all services have
+stopped (Eric Payne via jlowe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/976a3c1f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 1868b98..8074e17 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -217,6 +217,7 @@ public class MRAppMaster extends CompositeService {
   private final ScheduledExecutorService logSyncer;
 
   private long recoveredJobStartTime = 0;
+  private static boolean mainStarted = false;
 
   @VisibleForTesting
   protected AtomicBoolean successfullyUnregistered =
@@ -587,11 +588,37 @@ public class MRAppMaster extends CompositeService {
   clientService.stop();
 } catch (Throwable t) {
   LOG.warn(Graceful stop failed. Exiting.. , t);
-  ExitUtil.terminate(1, t);
+  exitMRAppMaster(1, t);
 }
+exitMRAppMaster(0, null);
+  }
 
+  /** MRAppMaster exit method which has been instrumented for both runtime and
+   *  unit testing.
+   * If the main thread has not been started, this method was called from a
+   * test. In that case, configure the ExitUtil object to not exit the JVM.
+   *
+   * @param status integer indicating exit status
+   * @param t throwable exception that could be null
+   */
+  private void exitMRAppMaster(int status, Throwable t) {
+if (!mainStarted) {
+  ExitUtil.disableSystemExit();
+}
+try {
+  if (t != null) {
+ExitUtil.terminate(status, t);
+  } else {
+ExitUtil.terminate(status);
+  }
+} catch (ExitUtil.ExitException ee) {
+  // ExitUtil.ExitException is only thrown from the ExitUtil test code when
+  // SystemExit has been disabled. It is always thrown in in the test code,
+  // even when no error occurs. Ignore the exception so that tests don't
+  // need to handle it.
+}
   }
- 
+
   private class JobFinishEventHandler implements EventHandlerJobFinishEvent {
 @Override
 public void handle(JobFinishEvent event) {
@@ -1388,6 +1415,7 @@ public class MRAppMaster extends CompositeService {
 
   public static void main(String[] args) {
 try {
+  mainStarted = true;
   Thread.setDefaultUncaughtExceptionHandler(new 
YarnUncaughtExceptionHandler());
   String containerIdStr =
   System.getenv(Environment.CONTAINER_ID.name());


hadoop git commit: MAPREDUCE-6279. AM should explicity exit JVM after all services have stopped. Contributed by Eric Payne

2015-05-07 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk b88700dcd - f30065c8b


MAPREDUCE-6279. AM should explicity exit JVM after all services have stopped. 
Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f30065c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f30065c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f30065c8

Branch: refs/heads/trunk
Commit: f30065c8b6099372f57015b505434120fe83c2b0
Parents: b88700d
Author: Jason Lowe jl...@apache.org
Authored: Thu May 7 22:05:12 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Thu May 7 22:05:12 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../hadoop/mapreduce/v2/app/MRAppMaster.java| 32 -
 .../mapreduce/v2/app/TestMRAppMaster.java   | 49 +++-
 3 files changed, 81 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f30065c8/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9af50fa..8f3c960 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -307,6 +307,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6192. Create unit test to automatically compare
 MR related classes and mapred-default.xml (rchiang via rkanter)
 
+MAPREDUCE-6279. AM should explicity exit JVM after all services have
+stopped (Eric Payne via jlowe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f30065c8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index c41f679..a5c9a25 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -220,6 +220,7 @@ public class MRAppMaster extends CompositeService {
   private final ScheduledExecutorService logSyncer;
 
   private long recoveredJobStartTime = 0;
+  private static boolean mainStarted = false;
 
   @VisibleForTesting
   protected AtomicBoolean successfullyUnregistered =
@@ -605,11 +606,37 @@ public class MRAppMaster extends CompositeService {
   clientService.stop();
 } catch (Throwable t) {
   LOG.warn(Graceful stop failed. Exiting.. , t);
-  ExitUtil.terminate(1, t);
+  exitMRAppMaster(1, t);
 }
+exitMRAppMaster(0, null);
+  }
 
+  /** MRAppMaster exit method which has been instrumented for both runtime and
+   *  unit testing.
+   * If the main thread has not been started, this method was called from a
+   * test. In that case, configure the ExitUtil object to not exit the JVM.
+   *
+   * @param status integer indicating exit status
+   * @param t throwable exception that could be null
+   */
+  private void exitMRAppMaster(int status, Throwable t) {
+if (!mainStarted) {
+  ExitUtil.disableSystemExit();
+}
+try {
+  if (t != null) {
+ExitUtil.terminate(status, t);
+  } else {
+ExitUtil.terminate(status);
+  }
+} catch (ExitUtil.ExitException ee) {
+  // ExitUtil.ExitException is only thrown from the ExitUtil test code when
+  // SystemExit has been disabled. It is always thrown in in the test code,
+  // even when no error occurs. Ignore the exception so that tests don't
+  // need to handle it.
+}
   }
- 
+
   private class JobFinishEventHandler implements EventHandlerJobFinishEvent {
 @Override
 public void handle(JobFinishEvent event) {
@@ -1407,6 +1434,7 @@ public class MRAppMaster extends CompositeService {
 
   public static void main(String[] args) {
 try {
+  mainStarted = true;
   Thread.setDefaultUncaughtExceptionHandler(new 
YarnUncaughtExceptionHandler());
   String containerIdStr =
   System.getenv(Environment.CONTAINER_ID.name());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f30065c8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java

hadoop git commit: HDFS-8321. CacheDirectives and CachePool operations should throw RetriableException in safemode. Contributed by Haohui Mai.

2015-05-07 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk f30065c8b - 767b91cd8


HDFS-8321. CacheDirectives and CachePool operations should throw 
RetriableException in safemode. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/767b91cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/767b91cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/767b91cd

Branch: refs/heads/trunk
Commit: 767b91cd834dc235ce9d116ba745b90c24ebe290
Parents: f30065c
Author: Haohui Mai whe...@apache.org
Authored: Mon May 4 15:47:16 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu May 7 15:17:11 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 32 +---
 2 files changed, 11 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/767b91cd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 74456db..be72b43 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -645,6 +645,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8325. Misspelling of threshold in log4j.properties for tests.
 (Brahma Reddy Battula via aajisaka)
 
+HDFS-8321. CacheDirectives and CachePool operations should throw
+RetriableException in safemode. (wheat9)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/767b91cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 84cb905..120812b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7679,10 +7679,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 writeLock();
 try {
   checkOperation(OperationCategory.WRITE);
-  if (isInSafeMode()) {
-throw new SafeModeException(
-Cannot add cache directive, safeMode);
-  }
+  checkNameNodeSafeMode(Cannot add cache directive);
   effectiveDirective = FSNDNCacheOp.addCacheDirective(this, cacheManager,
   directive, flags, logRetryCache);
 } finally {
@@ -7710,10 +7707,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 writeLock();
 try {
   checkOperation(OperationCategory.WRITE);
-  if (isInSafeMode()) {
-throw new SafeModeException(
-Cannot add cache directive, safeMode);
-  }
+  checkNameNodeSafeMode(Cannot add cache directive);
   FSNDNCacheOp.modifyCacheDirective(this, cacheManager, directive, flags,
   logRetryCache);
   success = true;
@@ -7734,10 +7728,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 writeLock();
 try {
   checkOperation(OperationCategory.WRITE);
-  if (isInSafeMode()) {
-throw new SafeModeException(
-Cannot remove cache directives, safeMode);
-  }
+  checkNameNodeSafeMode(Cannot remove cache directives);
   FSNDNCacheOp.removeCacheDirective(this, cacheManager, id, logRetryCache);
   success = true;
 } finally {
@@ -,10 +7768,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 String poolInfoStr = null;
 try {
   checkOperation(OperationCategory.WRITE);
-  if (isInSafeMode()) {
-throw new SafeModeException(
-Cannot add cache pool  + req.getPoolName(), safeMode);
-  }
+  checkNameNodeSafeMode(Cannot add cache pool
+  + (req == null ? null : req.getPoolName()));
   CachePoolInfo info = FSNDNCacheOp.addCachePool(this, cacheManager, req,
   logRetryCache);
   poolInfoStr = info.toString();
@@ -7800,10 +7789,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 boolean success = false;
 try {
   checkOperation(OperationCategory.WRITE);
-  if (isInSafeMode()) {
-throw new SafeModeException(
-Cannot modify cache pool  + req.getPoolName(), safeMode);
-  }
+  checkNameNodeSafeMode(Cannot modify cache pool
+  + (req == null ? null : req.getPoolName()));
   

hadoop git commit: YARN-3584. Fixed attempt diagnostics format shown on the UI. Contributed by nijel (cherry picked from commit b88700dcd0b9aa47662009241dfb83bc4446548d)

2015-05-07 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1886bab0f - c35114a1e


YARN-3584. Fixed attempt diagnostics format shown on the UI. Contributed by 
nijel
(cherry picked from commit b88700dcd0b9aa47662009241dfb83bc4446548d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c35114a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c35114a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c35114a1

Branch: refs/heads/branch-2
Commit: c35114a1e6b8672767feb8eea3a876d2840c6910
Parents: 1886bab
Author: Jian He jia...@apache.org
Authored: Thu May 7 13:54:07 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Thu May 7 13:56:25 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../resourcemanager/rmapp/attempt/RMAppAttemptImpl.java  | 8 
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c35114a1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6bf7292..bc19862 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -285,6 +285,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3577. Misspelling of threshold in log4j.properties for tests.
 (Brahma Reddy Battula via aajisaka)
 
+YARN-3584. Fixed attempt diagnostics format shown on the UI. (nijel via
+jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c35114a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 8abc65a..684dde8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1455,14 +1455,14 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   finishEvent.getApplicationAttemptId()).append(
exited with ).append( exitCode: ).append(status.getExitStatus()).
   append(\n);
+diagnosticsBuilder.append(Failing this attempt.).append(Diagnostics: )
+.append(status.getDiagnostics());
 if (this.getTrackingUrl() != null) {
   diagnosticsBuilder.append(For more detailed output,).append(
- check application tracking page:).append(
+ check application tracking page: ).append(
 this.getTrackingUrl()).append(
-Then, click on links to logs of each attempt.\n);
+ Then, click on links to logs of each attempt.\n);
 }
-diagnosticsBuilder.append(Diagnostics: ).append(status.getDiagnostics())
-.append(Failing this attempt);
 return diagnosticsBuilder.toString();
   }
 



hadoop git commit: HDFS-8037. CheckAccess in WebHDFS silently accepts malformed FsActions parameters. Contributed by Walter Su.

2015-05-07 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f6b908ab0 - a3abe8d7e


HDFS-8037. CheckAccess in WebHDFS silently accepts malformed FsActions 
parameters. Contributed by Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3abe8d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3abe8d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3abe8d7

Branch: refs/heads/branch-2
Commit: a3abe8d7e4c6d38c1949f2450ce32396c238a33f
Parents: f6b908a
Author: Haohui Mai whe...@apache.org
Authored: Thu May 7 15:25:26 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu May 7 15:25:36 2015 -0700

--
 .../hdfs/web/resources/FsActionParam.java   |  2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md|  4 +-
 .../hadoop/hdfs/web/resources/TestParam.java| 54 
 4 files changed, 60 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3abe8d7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
index c840196..726a229 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
@@ -30,7 +30,7 @@ public class FsActionParam extends StringParam {
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
 
-  private static String FS_ACTION_PATTERN = [rwx-]{3};
+  private static String FS_ACTION_PATTERN = [r-][w-][x-];
 
   private static final Domain DOMAIN = new Domain(NAME,
   Pattern.compile(FS_ACTION_PATTERN));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3abe8d7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d6cfd59..ab7dee8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -324,6 +324,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8321. CacheDirectives and CachePool operations should throw
 RetriableException in safemode. (wheat9)
 
+HDFS-8037. CheckAccess in WebHDFS silently accepts malformed FsActions
+parameters. (wheat9)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3abe8d7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 6432c57a..e6e41d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -1690,8 +1690,8 @@ See also: [Proxy Users](#Proxy_Users)
 | Description | File system operation read/write/execute |
 | Type | String |
 | Default Value | null (an invalid value) |
-| Valid Values | Strings matching regex pattern  [rwx-]{3}  |
-| Syntax |  [rwx-]{3}  |
+| Valid Values | Strings matching regex pattern  [r-][w-][x-]  |
+| Syntax |  [r-][w-][x-]  |
 
 See also: [`CHECKACCESS`](#Check_access),
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3abe8d7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
index a84243e..3728df0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
@@ -399,4 +399,58 @@ public class TestParam {
 Assert.assertEquals(s1, s1.getValue());
 Assert.assertEquals(s2, s2.getValue());
   }
+
+  @Test
+  public void testFsActionParam() {
+new FsActionParam(rwx);
+new FsActionParam(rw-);
+new FsActionParam(r-x);
+new FsActionParam(-wx);
+new FsActionParam(r--);
+new FsActionParam(-w-);
+new FsActionParam(--x);
+new 

hadoop git commit: HDFS-8037. CheckAccess in WebHDFS silently accepts malformed FsActions parameters. Contributed by Walter Su.

2015-05-07 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 767b91cd8 - 4d9f9e546


HDFS-8037. CheckAccess in WebHDFS silently accepts malformed FsActions 
parameters. Contributed by Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d9f9e54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d9f9e54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d9f9e54

Branch: refs/heads/trunk
Commit: 4d9f9e546ff9d8de75d08bf17d038c7d1ed3bc11
Parents: 767b91c
Author: Haohui Mai whe...@apache.org
Authored: Thu May 7 15:25:26 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu May 7 15:25:26 2015 -0700

--
 .../hdfs/web/resources/FsActionParam.java   |  2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md|  4 +-
 .../hadoop/hdfs/web/resources/TestParam.java| 54 
 4 files changed, 60 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d9f9e54/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
index c840196..726a229 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
@@ -30,7 +30,7 @@ public class FsActionParam extends StringParam {
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
 
-  private static String FS_ACTION_PATTERN = [rwx-]{3};
+  private static String FS_ACTION_PATTERN = [r-][w-][x-];
 
   private static final Domain DOMAIN = new Domain(NAME,
   Pattern.compile(FS_ACTION_PATTERN));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d9f9e54/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index be72b43..b4f2042 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -648,6 +648,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8321. CacheDirectives and CachePool operations should throw
 RetriableException in safemode. (wheat9)
 
+HDFS-8037. CheckAccess in WebHDFS silently accepts malformed FsActions
+parameters. (wheat9)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d9f9e54/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 6432c57a..e6e41d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -1690,8 +1690,8 @@ See also: [Proxy Users](#Proxy_Users)
 | Description | File system operation read/write/execute |
 | Type | String |
 | Default Value | null (an invalid value) |
-| Valid Values | Strings matching regex pattern  [rwx-]{3}  |
-| Syntax |  [rwx-]{3}  |
+| Valid Values | Strings matching regex pattern  [r-][w-][x-]  |
+| Syntax |  [r-][w-][x-]  |
 
 See also: [`CHECKACCESS`](#Check_access),
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d9f9e54/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
index a84243e..3728df0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
@@ -399,4 +399,58 @@ public class TestParam {
 Assert.assertEquals(s1, s1.getValue());
 Assert.assertEquals(s2, s2.getValue());
   }
+
+  @Test
+  public void testFsActionParam() {
+new FsActionParam(rwx);
+new FsActionParam(rw-);
+new FsActionParam(r-x);
+new FsActionParam(-wx);
+new FsActionParam(r--);
+new FsActionParam(-w-);
+new FsActionParam(--x);
+new 

[2/3] hadoop git commit: YARN-3448. Added a rolling time-to-live LevelDB timeline store implementation. Contributed by Jonathan Eagles.

2015-05-07 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb035ff0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
new file mode 100644
index 000..8b6a51b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -0,0 +1,1807 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeMap;
+
+import org.apache.commons.collections.map.LRUMap;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.WritableComparator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
+import 
org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto;
+import org.apache.hadoop.yarn.server.records.Version;
+import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl;
+import org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch;
+import org.apache.hadoop.yarn.server.timeline.TimelineDataManager.CheckAcl;
+import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyBuilder;
+import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyParser;
+
+import org.fusesource.leveldbjni.JniDBFactory;
+import org.iq80.leveldb.DB;
+import org.iq80.leveldb.DBException;
+import org.iq80.leveldb.DBIterator;
+import org.iq80.leveldb.Options;
+import org.iq80.leveldb.ReadOptions;
+import org.iq80.leveldb.WriteBatch;
+import org.nustaq.serialization.FSTConfiguration;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import static 
org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.readReverseOrderedLong;
+import static 
org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.writeReverseOrderedLong;
+import static 
org.apache.hadoop.yarn.server.timeline.TimelineDataManager.DEFAULT_DOMAIN_ID;
+import static 
org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.prefixMatches;
+import static 

[1/3] hadoop git commit: YARN-3448. Added a rolling time-to-live LevelDB timeline store implementation. Contributed by Jonathan Eagles.

2015-05-07 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 684a5a6ae - bb035ff08


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb035ff0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
index c5c0f93..121e9f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
@@ -43,8 +43,6 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.records.Version;
-import org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore;
-import org.apache.hadoop.yarn.server.timeline.NameValuePair;
 import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
 import org.iq80.leveldb.DBException;
 import org.junit.After;
@@ -155,7 +153,7 @@ public class TestLeveldbTimelineStore extends 
TimelineStoreTestUtils {
   return ((LeveldbTimelineStore)store).deleteNextEntity(entityType, ts,
   iterator, pfIterator, false);
 } catch(DBException e) {
-  throw new IOException(e);
+  throw new IOException(e);
 } finally {
   IOUtils.cleanup(null, iterator, pfIterator);
 }
@@ -179,12 +177,12 @@ public class TestLeveldbTimelineStore extends 
TimelineStoreTestUtils {
 assertEquals(1, getEntities(type_2).size());
 
 assertEquals(false, deleteNextEntity(entityType1,
-writeReverseOrderedLong(60l)));
+writeReverseOrderedLong(60L)));
 assertEquals(3, getEntities(type_1).size());
 assertEquals(1, getEntities(type_2).size());
 
 assertEquals(true, deleteNextEntity(entityType1,
-writeReverseOrderedLong(123l)));
+writeReverseOrderedLong(123L)));
 ListTimelineEntity entities = getEntities(type_2);
 assertEquals(1, entities.size());
 verifyEntityInfo(entityId2, entityType2, events2, Collections.singletonMap(
@@ -198,12 +196,12 @@ public class TestLeveldbTimelineStore extends 
TimelineStoreTestUtils {
 verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
 primaryFilters, otherInfo, entities.get(1), domainId2);
 
-((LeveldbTimelineStore)store).discardOldEntities(-123l);
+((LeveldbTimelineStore)store).discardOldEntities(0L);
 assertEquals(2, getEntities(type_1).size());
 assertEquals(0, getEntities(type_2).size());
 assertEquals(6, ((LeveldbTimelineStore)store).getEntityTypes().size());
 
-((LeveldbTimelineStore)store).discardOldEntities(123l);
+((LeveldbTimelineStore)store).discardOldEntities(123L);
 assertEquals(0, getEntities(type_1).size());
 assertEquals(0, getEntities(type_2).size());
 assertEquals(0, ((LeveldbTimelineStore)store).getEntityTypes().size());
@@ -240,11 +238,11 @@ public class TestLeveldbTimelineStore extends 
TimelineStoreTestUtils {
 verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
 primaryFilters, otherInfo, entities.get(2), domainId2);
 
-((LeveldbTimelineStore)store).discardOldEntities(-123l);
+((LeveldbTimelineStore)store).discardOldEntities(-123L);
 assertEquals(1, getEntitiesWithPrimaryFilter(type_1, pfPair).size());
 assertEquals(3, getEntitiesWithPrimaryFilter(type_1, userFilter).size());
 
-((LeveldbTimelineStore)store).discardOldEntities(123l);
+((LeveldbTimelineStore)store).discardOldEntities(123L);
 assertEquals(0, getEntities(type_1).size());
 assertEquals(0, getEntities(type_2).size());
 assertEquals(0, ((LeveldbTimelineStore)store).getEntityTypes().size());
@@ -261,7 +259,7 @@ public class TestLeveldbTimelineStore extends 
TimelineStoreTestUtils {
 assertEquals(1, getEntitiesFromTs(type_2, l).size());
 assertEquals(3, getEntitiesFromTsWithPrimaryFilter(type_1, userFilter,
 l).size());
-((LeveldbTimelineStore)store).discardOldEntities(123l);
+((LeveldbTimelineStore)store).discardOldEntities(123L);
 assertEquals(0, getEntitiesFromTs(type_1, l).size());
 assertEquals(0, getEntitiesFromTs(type_2, l).size());
 assertEquals(0, 

[3/3] hadoop git commit: YARN-3448. Added a rolling time-to-live LevelDB timeline store implementation. Contributed by Jonathan Eagles.

2015-05-07 Thread zjshen
YARN-3448. Added a rolling time-to-live LevelDB timeline store implementation. 
Contributed by Jonathan Eagles.

(cherry picked from commit daf3e4ef8bf73cbe4a799d51b4765809cd81089f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb035ff0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb035ff0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb035ff0

Branch: refs/heads/branch-2
Commit: bb035ff08766037a3ff42ffe3c566a981f6f1f6e
Parents: 684a5a6
Author: Zhijie Shen zjs...@apache.org
Authored: Thu May 7 10:01:51 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu May 7 10:10:13 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |3 +
 .../records/timeline/TimelinePutResponse.java   |6 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   47 +-
 .../pom.xml |5 +
 .../yarn/server/timeline/RollingLevelDB.java|  420 
 .../timeline/RollingLevelDBTimelineStore.java   | 1807 ++
 .../server/timeline/TimelineDataManager.java|   44 +-
 .../yarn/server/timeline/util/LeveldbUtils.java |   73 +-
 .../timeline/TestLeveldbTimelineStore.java  |   29 +-
 .../server/timeline/TestRollingLevelDB.java |  100 +
 .../TestRollingLevelDBTimelineStore.java|  427 +
 .../server/timeline/TimelineStoreTestUtils.java |   12 +-
 12 files changed, 2907 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb035ff0/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f9f125e..6bf7292 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -60,6 +60,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2619. Added NodeManager support for disk io isolation through cgroups.
 (Varun Vasudev and Wei Yan via vinodkv)
 
+YARN-3448. Added a rolling time-to-live LevelDB timeline store 
implementation.
+(Jonathan Eagles via zjshen)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb035ff0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
index a56d4d4..abe106f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
@@ -129,6 +129,12 @@ public class TimelinePutResponse {
  */
 public static final int FORBIDDEN_RELATION = 6;
 
+/**
+ * Error code returned if the entity start time is before the eviction
+ * period of old data.
+ */
+public static final int EXPIRED_ENTITY = 7;
+
 private String entityId;
 private String entityType;
 private int errorCode;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb035ff0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 790a4dd..9780ae5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1431,6 +1431,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_TIMELINE_SERVICE_TTL_MS =
   1000 * 60 * 60 * 24 * 7;
 
+  /** Timeline service rolling period. Valid values are daily, half_daily,
+   * quarter_daily, and hourly. */
+  public static final String TIMELINE_SERVICE_ROLLING_PERIOD =
+  TIMELINE_SERVICE_PREFIX + rolling-period;
+
+  /** Roll a new database each hour. */
+  public static final String DEFAULT_TIMELINE_SERVICE_ROLLING_PERIOD =
+  hourly;
+
+  /** Implementation specific configuration prefix for Timeline Service
+   * leveldb.
+   */
   public static final String 

[2/3] hadoop git commit: YARN-3448. Added a rolling time-to-live LevelDB timeline store implementation. Contributed by Jonathan Eagles.

2015-05-07 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/daf3e4ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
new file mode 100644
index 000..8b6a51b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -0,0 +1,1807 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeMap;
+
+import org.apache.commons.collections.map.LRUMap;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.WritableComparator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
+import 
org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto;
+import org.apache.hadoop.yarn.server.records.Version;
+import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl;
+import org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch;
+import org.apache.hadoop.yarn.server.timeline.TimelineDataManager.CheckAcl;
+import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyBuilder;
+import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyParser;
+
+import org.fusesource.leveldbjni.JniDBFactory;
+import org.iq80.leveldb.DB;
+import org.iq80.leveldb.DBException;
+import org.iq80.leveldb.DBIterator;
+import org.iq80.leveldb.Options;
+import org.iq80.leveldb.ReadOptions;
+import org.iq80.leveldb.WriteBatch;
+import org.nustaq.serialization.FSTConfiguration;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import static 
org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.readReverseOrderedLong;
+import static 
org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.writeReverseOrderedLong;
+import static 
org.apache.hadoop.yarn.server.timeline.TimelineDataManager.DEFAULT_DOMAIN_ID;
+import static 
org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.prefixMatches;
+import static 

[1/3] hadoop git commit: YARN-3448. Added a rolling time-to-live LevelDB timeline store implementation. Contributed by Jonathan Eagles.

2015-05-07 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8e991f4b1 - daf3e4ef8


http://git-wip-us.apache.org/repos/asf/hadoop/blob/daf3e4ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
index c5c0f93..121e9f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
@@ -43,8 +43,6 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.records.Version;
-import org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore;
-import org.apache.hadoop.yarn.server.timeline.NameValuePair;
 import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
 import org.iq80.leveldb.DBException;
 import org.junit.After;
@@ -155,7 +153,7 @@ public class TestLeveldbTimelineStore extends 
TimelineStoreTestUtils {
   return ((LeveldbTimelineStore)store).deleteNextEntity(entityType, ts,
   iterator, pfIterator, false);
 } catch(DBException e) {
-  throw new IOException(e);
+  throw new IOException(e);
 } finally {
   IOUtils.cleanup(null, iterator, pfIterator);
 }
@@ -179,12 +177,12 @@ public class TestLeveldbTimelineStore extends 
TimelineStoreTestUtils {
 assertEquals(1, getEntities(type_2).size());
 
 assertEquals(false, deleteNextEntity(entityType1,
-writeReverseOrderedLong(60l)));
+writeReverseOrderedLong(60L)));
 assertEquals(3, getEntities(type_1).size());
 assertEquals(1, getEntities(type_2).size());
 
 assertEquals(true, deleteNextEntity(entityType1,
-writeReverseOrderedLong(123l)));
+writeReverseOrderedLong(123L)));
 ListTimelineEntity entities = getEntities(type_2);
 assertEquals(1, entities.size());
 verifyEntityInfo(entityId2, entityType2, events2, Collections.singletonMap(
@@ -198,12 +196,12 @@ public class TestLeveldbTimelineStore extends 
TimelineStoreTestUtils {
 verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
 primaryFilters, otherInfo, entities.get(1), domainId2);
 
-((LeveldbTimelineStore)store).discardOldEntities(-123l);
+((LeveldbTimelineStore)store).discardOldEntities(0L);
 assertEquals(2, getEntities(type_1).size());
 assertEquals(0, getEntities(type_2).size());
 assertEquals(6, ((LeveldbTimelineStore)store).getEntityTypes().size());
 
-((LeveldbTimelineStore)store).discardOldEntities(123l);
+((LeveldbTimelineStore)store).discardOldEntities(123L);
 assertEquals(0, getEntities(type_1).size());
 assertEquals(0, getEntities(type_2).size());
 assertEquals(0, ((LeveldbTimelineStore)store).getEntityTypes().size());
@@ -240,11 +238,11 @@ public class TestLeveldbTimelineStore extends 
TimelineStoreTestUtils {
 verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
 primaryFilters, otherInfo, entities.get(2), domainId2);
 
-((LeveldbTimelineStore)store).discardOldEntities(-123l);
+((LeveldbTimelineStore)store).discardOldEntities(-123L);
 assertEquals(1, getEntitiesWithPrimaryFilter(type_1, pfPair).size());
 assertEquals(3, getEntitiesWithPrimaryFilter(type_1, userFilter).size());
 
-((LeveldbTimelineStore)store).discardOldEntities(123l);
+((LeveldbTimelineStore)store).discardOldEntities(123L);
 assertEquals(0, getEntities(type_1).size());
 assertEquals(0, getEntities(type_2).size());
 assertEquals(0, ((LeveldbTimelineStore)store).getEntityTypes().size());
@@ -261,7 +259,7 @@ public class TestLeveldbTimelineStore extends 
TimelineStoreTestUtils {
 assertEquals(1, getEntitiesFromTs(type_2, l).size());
 assertEquals(3, getEntitiesFromTsWithPrimaryFilter(type_1, userFilter,
 l).size());
-((LeveldbTimelineStore)store).discardOldEntities(123l);
+((LeveldbTimelineStore)store).discardOldEntities(123L);
 assertEquals(0, getEntitiesFromTs(type_1, l).size());
 assertEquals(0, getEntitiesFromTs(type_2, l).size());
 assertEquals(0, 

[3/3] hadoop git commit: YARN-3448. Added a rolling time-to-live LevelDB timeline store implementation. Contributed by Jonathan Eagles.

2015-05-07 Thread zjshen
YARN-3448. Added a rolling time-to-live LevelDB timeline store implementation. 
Contributed by Jonathan Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/daf3e4ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/daf3e4ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/daf3e4ef

Branch: refs/heads/trunk
Commit: daf3e4ef8bf73cbe4a799d51b4765809cd81089f
Parents: 8e991f4
Author: Zhijie Shen zjs...@apache.org
Authored: Thu May 7 10:01:51 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu May 7 10:01:51 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |3 +
 .../records/timeline/TimelinePutResponse.java   |6 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   47 +-
 .../pom.xml |5 +
 .../yarn/server/timeline/RollingLevelDB.java|  420 
 .../timeline/RollingLevelDBTimelineStore.java   | 1807 ++
 .../server/timeline/TimelineDataManager.java|   44 +-
 .../yarn/server/timeline/util/LeveldbUtils.java |   73 +-
 .../timeline/TestLeveldbTimelineStore.java  |   29 +-
 .../server/timeline/TestRollingLevelDB.java |  100 +
 .../TestRollingLevelDBTimelineStore.java|  427 +
 .../server/timeline/TimelineStoreTestUtils.java |   12 +-
 12 files changed, 2907 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/daf3e4ef/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 55c65f5..ff7921c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -108,6 +108,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2619. Added NodeManager support for disk io isolation through cgroups.
 (Varun Vasudev and Wei Yan via vinodkv)
 
+YARN-3448. Added a rolling time-to-live LevelDB timeline store 
implementation.
+(Jonathan Eagles via zjshen)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/daf3e4ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
index a56d4d4..abe106f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
@@ -129,6 +129,12 @@ public class TimelinePutResponse {
  */
 public static final int FORBIDDEN_RELATION = 6;
 
+/**
+ * Error code returned if the entity start time is before the eviction
+ * period of old data.
+ */
+public static final int EXPIRED_ENTITY = 7;
+
 private String entityId;
 private String entityType;
 private int errorCode;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/daf3e4ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 70b87f3..3bf25ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1431,6 +1431,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_TIMELINE_SERVICE_TTL_MS =
   1000 * 60 * 60 * 24 * 7;
 
+  /** Timeline service rolling period. Valid values are daily, half_daily,
+   * quarter_daily, and hourly. */
+  public static final String TIMELINE_SERVICE_ROLLING_PERIOD =
+  TIMELINE_SERVICE_PREFIX + rolling-period;
+
+  /** Roll a new database each hour. */
+  public static final String DEFAULT_TIMELINE_SERVICE_ROLLING_PERIOD =
+  hourly;
+
+  /** Implementation specific configuration prefix for Timeline Service
+   * leveldb.
+   */
   public static final String TIMELINE_SERVICE_LEVELDB_PREFIX =
   TIMELINE_SERVICE_PREFIX +