hadoop git commit: Revert "HADOOP-13852 hadoop build to allow hadoop version property to be explicitly set. Contriibuted by Steve Loughran"

2016-12-08 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 13d8e552d -> 7d8e440ee


Revert "HADOOP-13852 hadoop build to allow hadoop version property to be 
explicitly set. Contriibuted by Steve Loughran"

This reverts commit c2655157257079b8541d71bb1e5b6cbae75561ff.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d8e440e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d8e440e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d8e440e

Branch: refs/heads/trunk
Commit: 7d8e440eee51562d0769efe04eb97256fe6061d1
Parents: 13d8e55
Author: Akira Ajisaka 
Authored: Fri Dec 9 12:49:27 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Dec 9 12:49:27 2016 +0900

--
 BUILDING.txt | 11 +--
 hadoop-common-project/hadoop-common/pom.xml  |  3 ---
 .../src/main/resources/common-version-info.properties|  4 ++--
 .../src/main/resources/yarn-version-info.properties  |  2 +-
 4 files changed, 4 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d8e440e/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 7afc3f0..8b2bba6 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -390,7 +390,7 @@ http://www.zlib.net/
 
--
 Building distributions:
 
- * Build distribution with native code: mvn package 
[-Pdist][-Pdocs][-Psrc][-Dtar][-Dmaven.javadoc.skip=true]
+ * Build distribution with native code: mvn package 
[-Pdist][-Pdocs][-Psrc][-Dtar]
 
 
--
 Running compatibility checks with checkcompatibility.py
@@ -402,12 +402,3 @@ managers to compare the compatibility of a previous and 
current release.
 As an example, this invocation will check the compatibility of interfaces 
annotated as Public or LimitedPrivate:
 
 ./dev-support/bin/checkcompatibility.py --annotation 
org.apache.hadoop.classification.InterfaceAudience.Public --annotation 
org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate --include 
"hadoop.*" branch-2.7.2 trunk
-
---
-Changing the Hadoop version declared returned by VersionInfo
-
-If for compatibility reasons the version of Hadoop has to be declared as a 2.x 
release in the information returned by
-org.apache.hadoop.util.VersionInfo, set the property declared.hadoop.version 
to the desired version.
-For example: mvn package -Pdist -Ddeclared.hadoop.version=2.11
-
-If unset, the project version declared in the POM file is used.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d8e440e/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index aa20f79..c9b282f 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -36,9 +36,6 @@
 true
 ../etc/hadoop
 wsce-site.xml
-
-${pom.version}
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d8e440e/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
 
b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
index 9b74960..ad9a24d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
+++ 
b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
@@ -16,11 +16,11 @@
 # limitations under the License.
 #
 
-version=${declared.hadoop.version}
+version=${pom.version}
 revision=${version-info.scm.commit}
 branch=${version-info.scm.branch}
 user=${user.name}
 date=${version-info.build.time}
 url=${version-info.scm.uri}
 srcChecksum=${version-info.source.md5}
-protocVersion=${protobuf.version}
+protocVersion=${protobuf.version}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d8e440e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
index ee6f13d..9a8575c 100644
--- 

[2/2] hadoop git commit: MAPREDUCE-6818. Remove direct reference to TimelineClientImpl. Contributed by Li Lu.

2016-12-08 Thread sjlee
MAPREDUCE-6818. Remove direct reference to TimelineClientImpl. Contributed by 
Li Lu.

(cherry picked from commit 6217b87f4a056cf704cef2e073b386b7803415de)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/385d8fae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/385d8fae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/385d8fae

Branch: refs/heads/YARN-5355-branch-2
Commit: 385d8fae88d5aaf439333f780189374116a5afee
Parents: d8e424d
Author: Sangjin Lee 
Authored: Thu Dec 8 18:14:09 2016 -0800
Committer: Sangjin Lee 
Committed: Thu Dec 8 18:14:46 2016 -0800

--
 .../org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java | 3 +--
 .../java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java| 3 +--
 2 files changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/385d8fae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
index 447ea4e..d553596 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
 
@@ -54,7 +53,7 @@ class JobHistoryFileReplayMapperV1 extends
 
   public void map(IntWritable key, IntWritable val, Context context) throws 
IOException {
 // collect the apps it needs to process
-TimelineClient tlc = new TimelineClientImpl();
+TimelineClient tlc = TimelineClient.createTimelineClient();
 TimelineEntityConverterV1 converter = new TimelineEntityConverterV1();
 JobHistoryFileReplayHelper helper = new 
JobHistoryFileReplayHelper(context);
 int replayMode = helper.getReplayMode();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/385d8fae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
index 16d14a1..6d6151f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 
 /**
* Adds simple entities with random string payload, events, metrics, and
@@ -46,7 +45,7 @@ class SimpleEntityWriterV1
 
   public void map(IntWritable key, IntWritable val, Context context)
   throws IOException {
-TimelineClient tlc = new TimelineClientImpl();
+TimelineClient tlc = TimelineClient.createTimelineClient();
 Configuration conf = context.getConfiguration();
 
 final int kbs = conf.getInt(KBS_SENT, KBS_SENT_DEFAULT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: MAPREDUCE-6818. Remove direct reference to TimelineClientImpl. Contributed by Li Lu.

2016-12-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 17c4ab7eb -> 6217b87f4
  refs/heads/YARN-5355-branch-2 d8e424d85 -> 385d8fae8


MAPREDUCE-6818. Remove direct reference to TimelineClientImpl. Contributed by 
Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6217b87f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6217b87f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6217b87f

Branch: refs/heads/YARN-5355
Commit: 6217b87f4a056cf704cef2e073b386b7803415de
Parents: 17c4ab7
Author: Sangjin Lee 
Authored: Thu Dec 8 18:14:09 2016 -0800
Committer: Sangjin Lee 
Committed: Thu Dec 8 18:14:09 2016 -0800

--
 .../org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java | 3 +--
 .../java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java| 3 +--
 2 files changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6217b87f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
index 447ea4e..d553596 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
 
@@ -54,7 +53,7 @@ class JobHistoryFileReplayMapperV1 extends
 
   public void map(IntWritable key, IntWritable val, Context context) throws 
IOException {
 // collect the apps it needs to process
-TimelineClient tlc = new TimelineClientImpl();
+TimelineClient tlc = TimelineClient.createTimelineClient();
 TimelineEntityConverterV1 converter = new TimelineEntityConverterV1();
 JobHistoryFileReplayHelper helper = new 
JobHistoryFileReplayHelper(context);
 int replayMode = helper.getReplayMode();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6217b87f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
index 16d14a1..6d6151f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 
 /**
* Adds simple entities with random string payload, events, metrics, and
@@ -46,7 +45,7 @@ class SimpleEntityWriterV1
 
   public void map(IntWritable key, IntWritable val, Context context)
   throws IOException {
-TimelineClient tlc = new TimelineClientImpl();
+TimelineClient tlc = TimelineClient.createTimelineClient();
 Configuration conf = context.getConfiguration();
 
 final int kbs = conf.getInt(KBS_SENT, KBS_SENT_DEFAULT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: HDFS-11223. Fix typos in HttpFs documentations. Contributed by Yiqun Lin.

2016-12-08 Thread stevel
HDFS-11223. Fix typos in HttpFs documentations. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c2cf556
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c2cf556
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c2cf556

Branch: refs/heads/HADOOP-13345
Commit: 4c2cf5560f6d952cfa36ef656f0b04dc3150f8b3
Parents: 74d0066
Author: Akira Ajisaka 
Authored: Thu Dec 8 20:52:24 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 8 20:52:24 2016 +0900

--
 .../hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm | 2 +-
 .../hadoop-hdfs-httpfs/src/site/markdown/index.md  | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c2cf556/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
index 0cb89de..4b66732 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
@@ -50,7 +50,7 @@ IMPORTANT: Replace `#HTTPFSUSER#` with the Unix user that 
will start the HttpFS
 Restart Hadoop
 --
 
-You need to restart Hadoop for the proxyuser configuration ot become active.
+You need to restart Hadoop for the proxyuser configuration to become active.
 
 Start/Stop HttpFS
 -

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c2cf556/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
index 750b7f4..145feb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
@@ -15,7 +15,7 @@
 Hadoop HDFS over HTTP - Documentation Sets
 ==
 
-HttpFS is a server that provides a REST HTTP gateway supporting all HDFS File 
System operations (read and write). And it is inteoperable with the **webhdfs** 
REST HTTP API.
+HttpFS is a server that provides a REST HTTP gateway supporting all HDFS File 
System operations (read and write). And it is interoperable with the 
**webhdfs** REST HTTP API.
 
 HttpFS can be used to transfer data between clusters running different 
versions of Hadoop (overcoming RPC versioning issues), for example using Hadoop 
DistCP.
 
@@ -23,9 +23,9 @@ HttpFS can be used to access data in HDFS on a cluster behind 
of a firewall (the
 
 HttpFS can be used to access data in HDFS using HTTP utilities (such as curl 
and wget) and HTTP libraries Perl from other languages than Java.
 
-The **webhdfs** client FileSytem implementation can be used to access HttpFS 
using the Hadoop filesystem command (`hadoop fs`) line tool as well as from 
Java applications using the Hadoop FileSystem Java API.
+The **webhdfs** client FileSystem implementation can be used to access HttpFS 
using the Hadoop filesystem command (`hadoop fs`) line tool as well as from 
Java applications using the Hadoop FileSystem Java API.
 
-HttpFS has built-in security supporting Hadoop pseudo authentication and HTTP 
SPNEGO Kerberos and other pluggable authentication mechanims. It also provides 
Hadoop proxy user support.
+HttpFS has built-in security supporting Hadoop pseudo authentication and HTTP 
SPNEGO Kerberos and other pluggable authentication mechanisms. It also provides 
Hadoop proxy user support.
 
 How Does HttpFS Works?
 --


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: HDFS-11178. TestAddStripedBlockInFBR#testAddBlockInFullBlockReport fails frequently in trunk. Contributed By Yiqun Lin.

2016-12-08 Thread stevel
HDFS-11178. TestAddStripedBlockInFBR#testAddBlockInFullBlockReport fails 
frequently in trunk. Contributed By Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed898567
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed898567
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed898567

Branch: refs/heads/HADOOP-13345
Commit: ed89856755fd20e814d3ba58e2c183a85a9389d3
Parents: 4dd4f3a
Author: Brahma Reddy Battula 
Authored: Tue Dec 6 22:30:43 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Dec 6 22:30:43 2016 +0530

--
 .../namenode/TestAddStripedBlockInFBR.java  | 34 ++--
 1 file changed, 24 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed898567/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
index ab24a25..6e02372 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
@@ -30,9 +30,8 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -40,6 +39,8 @@ import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
 
+import com.google.common.base.Supplier;
+
 import java.io.IOException;
 
 public class TestAddStripedBlockInFBR {
@@ -98,14 +99,27 @@ public class TestAddStripedBlockInFBR {
 DFSTestUtil.createFile(dfs, ecFile,
 cellSize * dataBlocks, (short) 1, 0L);
 
-// trigger dn's FBR. The FBR will add block-dn mapping.
-DataNodeTestUtils.triggerBlockReport(dn);
+GenericTestUtils.waitFor(new Supplier() {
+
+  @Override
+  public Boolean get() {
+try {
+  // trigger dn's FBR. The FBR will add block-dn mapping.
+  cluster.triggerBlockReports();
+
+  // make sure NN has correct block-dn mapping
+  BlockInfoStriped blockInfo = (BlockInfoStriped) cluster
+  .getNamesystem().getFSDirectory().getINode(ecFile.toString())
+  .asFile().getLastBlock();
+  NumberReplicas nr = spy.countNodes(blockInfo);
+
+  return nr.excessReplicas() == 0 && nr.liveReplicas() == groupSize;
+} catch (Exception ignored) {
+  // Ignore the exception
+}
 
-// make sure NN has correct block-dn mapping
-BlockInfoStriped blockInfo = (BlockInfoStriped) cluster.getNamesystem()
-.getFSDirectory().getINode(ecFile.toString()).asFile().getLastBlock();
-NumberReplicas nr = spy.countNodes(blockInfo);
-Assert.assertEquals(groupSize, nr.liveReplicas());
-Assert.assertEquals(0, nr.excessReplicas());
+return false;
+  }
+}, 3000, 6);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into s3guard/HADOOP-13345

2016-12-08 Thread stevel
Merge branch 'trunk' into s3guard/HADOOP-13345


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/881de1fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/881de1fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/881de1fb

Branch: refs/heads/HADOOP-13345
Commit: 881de1fba7299fb534d4cb3aae467ae2dd3dd9ee
Parents: 013a3c4 c265515
Author: Steve Loughran 
Authored: Thu Dec 8 17:58:10 2016 +
Committer: Steve Loughran 
Committed: Thu Dec 8 17:58:10 2016 +

--
 BUILDING.txt|11 +-
 LICENSE.txt | 2 +-
 dev-support/bin/qbt | 0
 .../util/ZKSignerSecretProvider.java| 2 +-
 hadoop-common-project/hadoop-common/pom.xml | 4 +
 .../apache/hadoop/crypto/key/KeyProvider.java   |28 +
 .../crypto/key/KeyProviderCryptoExtension.java  |94 +-
 .../crypto/key/kms/KMSClientProvider.java   |77 +-
 .../hadoop/crypto/key/kms/KMSRESTConstants.java | 1 +
 .../key/kms/LoadBalancingKMSClientProvider.java |18 +
 .../org/apache/hadoop/fs/FilterFileSystem.java  | 7 +
 .../org/apache/hadoop/fs/LocalDirAllocator.java | 2 +-
 .../apache/hadoop/fs/shell/CommandFormat.java   | 2 +-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |18 +
 .../io/erasurecode/ErasureCodeConstants.java| 3 +
 .../io/erasurecode/rawcoder/util/GF256.java | 2 +-
 .../org/apache/hadoop/io/file/tfile/TFile.java  | 2 +-
 .../org/apache/hadoop/io/file/tfile/Utils.java  | 2 +-
 .../org/apache/hadoop/net/NetworkTopology.java  |   162 +-
 .../java/org/apache/hadoop/security/KDiag.java  | 2 +-
 .../alias/LocalJavaKeyStoreProvider.java| 2 +-
 .../security/ssl/FileBasedKeyStoresFactory.java | 6 +-
 .../security/ssl/ReloadingX509TrustManager.java | 2 +-
 .../src/main/native/gtest/gtest-all.cc  | 10403 
 .../src/main/native/gtest/include/gtest/gtest.h | 21192 +
 .../resources/common-version-info.properties| 4 +-
 .../conf/TestConfigurationFieldsBase.java   |20 +-
 .../key/TestKeyProviderCryptoExtension.java |   114 +
 .../fs/FileContextMainOperationsBaseTest.java   |21 +-
 .../hadoop/fs/FileSystemContractBaseTest.java   |17 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |12 +-
 .../fs/viewfs/ViewFileSystemBaseTest.java   |29 +
 .../ssl/TestReloadingX509TrustManager.java  |18 +
 ...rKeyGeneratorKeyProviderCryptoExtension.java | 7 +-
 .../hadoop/crypto/key/kms/server/KMS.java   |38 +-
 .../hadoop/crypto/key/kms/server/KMSAudit.java  | 4 +-
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 7 +-
 .../kms/server/KeyAuthorizationKeyProvider.java |13 +
 .../hadoop-kms/src/site/markdown/index.md.vm|39 +-
 .../hadoop/crypto/key/kms/server/TestKMS.java   |90 +-
 .../crypto/key/kms/server/TestKMSAudit.java |11 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java | 1 +
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |49 +
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |46 +
 .../hadoop/hdfs/web/resources/GetOpParam.java   | 3 +
 .../hadoop/hdfs/web/resources/PostOpParam.java  | 2 +
 .../hadoop/hdfs/web/resources/PutOpParam.java   | 1 +
 .../hdfs/web/resources/StoragePolicyParam.java  |43 +
 .../hadoop/fs/http/client/HttpFSFileSystem.java |92 +-
 .../hadoop/fs/http/server/FSOperations.java |   130 +
 .../http/server/HttpFSParametersProvider.java   |23 +
 .../hadoop/fs/http/server/HttpFSServer.java |35 +
 .../src/site/markdown/ServerSetup.md.vm | 2 +-
 .../src/site/markdown/index.md  | 6 +-
 .../fs/http/client/BaseTestHttpFSWith.java  |52 +-
 .../main/native/fuse-dfs/fuse_dfs_wrapper.sh| 2 +-
 .../dev-support/findbugsExcludeFile.xml |27 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |76 +-
 .../server/blockmanagement/BlockManager.java|79 +-
 .../server/blockmanagement/DatanodeManager.java |12 +-
 .../hdfs/server/datanode/BPOfferService.java|10 +-
 .../hdfs/server/datanode/BPServiceActor.java| 4 +-
 .../hdfs/server/datanode/BlockReceiver.java |66 +-
 .../hdfs/server/datanode/BlockSender.java   |   105 +-
 .../hadoop/hdfs/server/datanode/DNConf.java | 4 +
 .../hadoop/hdfs/server/datanode/DataNode.java   | 2 +-
 .../hdfs/server/datanode/DataStorage.java   | 5 +
 .../hdfs/server/datanode/DirectoryScanner.java  |12 +-
 .../hdfs/server/datanode/LocalReplica.java  |   179 +-
 .../server/datanode/LocalReplicaInPipeline.java |30 +-
 .../hdfs/server/datanode/ReplicaInPipeline.java | 4 +-
 

[36/50] [abbrv] hadoop git commit: YARN-5965. Retrospect ApplicationReport#getApplicationTimeouts. Contributed by Rohith Sharma K S

2016-12-08 Thread stevel
YARN-5965. Retrospect ApplicationReport#getApplicationTimeouts. Contributed by 
Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab923a53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab923a53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab923a53

Branch: refs/heads/HADOOP-13345
Commit: ab923a53fcf55d4d75aa027d46e3c4a659015325
Parents: c73e08a
Author: Sunil 
Authored: Thu Dec 8 00:27:25 2016 +0530
Committer: Sunil 
Committed: Thu Dec 8 00:27:25 2016 +0530

--
 .../yarn/api/records/ApplicationReport.java |   6 +-
 .../src/main/proto/yarn_protos.proto|   7 +-
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |   3 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java |   7 +-
 .../impl/pb/ApplicationReportPBImpl.java| 107 +++
 .../server/resourcemanager/rmapp/RMAppImpl.java |  13 ++-
 .../rmapp/TestApplicationLifetimeMonitor.java   |  15 ++-
 7 files changed, 91 insertions(+), 67 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab923a53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
index c039514..9e9ec3c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
@@ -25,7 +25,7 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.util.Records;
 
-import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 /**
@@ -451,10 +451,10 @@ public abstract class ApplicationReport {
 
   @Public
   @Unstable
-  public abstract List getApplicationTimeouts();
+  public abstract Map 
getApplicationTimeouts();
 
   @Private
   @Unstable
   public abstract void setApplicationTimeouts(
-  List timeouts);
+  Map timeouts);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab923a53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 43a661f..5a70298 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -214,7 +214,12 @@ message ApplicationReportProto {
   optional PriorityProto priority = 23;
   optional string appNodeLabelExpression = 24;
   optional string amNodeLabelExpression = 25;
-  repeated ApplicationTimeoutProto application_timeouts = 26;
+  repeated AppTimeoutsMapProto appTimeouts = 26;
+}
+
+message AppTimeoutsMapProto {
+  optional ApplicationTimeoutTypeProto application_timeout_type = 1;
+  optional ApplicationTimeoutProto application_timeout = 2;
 }
 
 message ApplicationTimeoutProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab923a53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index 20a65bf..efe5921 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -714,7 +714,8 @@ public class ApplicationCLI extends YarnCLI {
   appReportStr.println(appReport.getAppNodeLabelExpression());
   appReportStr.print("\tAM container Node Label Expression : ");
   appReportStr.println(appReport.getAmNodeLabelExpression());
-  for (ApplicationTimeout timeout : appReport.getApplicationTimeouts()) {
+  for (ApplicationTimeout timeout : 

[42/50] [abbrv] hadoop git commit: MAPREDUCE-4683. Create and distribute hadoop-mapreduce-client-core-tests.jar.

2016-12-08 Thread stevel
MAPREDUCE-4683. Create and distribute hadoop-mapreduce-client-core-tests.jar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f54afdb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f54afdb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f54afdb8

Branch: refs/heads/HADOOP-13345
Commit: f54afdb83b2567f63f0dc94f5f4e26ead2eefc5e
Parents: 3c774b8
Author: Akira Ajisaka 
Authored: Thu Dec 8 11:27:58 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 8 11:27:58 2016 +0900

--
 .../hadoop-mapreduce-client-core/pom.xml| 16 
 1 file changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f54afdb8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index 38a7725..aefba7f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -61,6 +61,22 @@
   
 
   
+maven-jar-plugin
+
+  
+
+  test-jar
+
+
+  
+**/mapred-queues.xml
+  
+
+test-compile
+  
+
+  
+  
 org.apache.maven.plugins
 maven-surefire-plugin



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: HDFS-11172. Support an erasure coding policy using RS 10 + 4. Contributed by Wei Zhou.

2016-12-08 Thread stevel
HDFS-11172. Support an erasure coding policy using RS 10 + 4. Contributed by 
Wei Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f53b481f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f53b481f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f53b481f

Branch: refs/heads/HADOOP-13345
Commit: f53b481ffeec64831c1d9b54a555c7b570d116a3
Parents: 79d90b8
Author: Andrew Wang 
Authored: Tue Dec 6 12:49:38 2016 -0800
Committer: Andrew Wang 
Committed: Tue Dec 6 12:49:38 2016 -0800

--
 .../io/erasurecode/ErasureCodeConstants.java|  3 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  1 +
 .../namenode/ErasureCodingPolicyManager.java|  5 ++-
 .../src/site/markdown/HDFSErasureCoding.md  |  2 +-
 .../TestDFSRSDefault10x4StripedInputStream.java | 35 +++
 ...TestDFSRSDefault10x4StripedOutputStream.java | 36 
 ...fault10x4StripedOutputStreamWithFailure.java | 36 
 7 files changed, 116 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f53b481f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index ffa0bce..e168909 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -41,4 +41,7 @@ public final class ErasureCodeConstants {
 
   public static final ECSchema XOR_2_1_SCHEMA = new ECSchema(
   XOR_CODEC_NAME, 2, 1);
+
+  public static final ECSchema RS_10_4_SCHEMA = new ECSchema(
+  RS_DEFAULT_CODEC_NAME, 10, 4);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f53b481f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index b55b4df..a9f1839 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -148,6 +148,7 @@ public final class HdfsConstants {
   public static final byte RS_3_2_POLICY_ID = 1;
   public static final byte RS_6_3_LEGACY_POLICY_ID = 2;
   public static final byte XOR_2_1_POLICY_ID = 3;
+  public static final byte RS_10_4_POLICY_ID = 4;
 
   /* Hidden constructor */
   protected HdfsConstants() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f53b481f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 8a85d23..a1b2270 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -51,11 +51,14 @@ public final class ErasureCodingPolicyManager {
   private static final ErasureCodingPolicy SYS_POLICY4 =
   new ErasureCodingPolicy(ErasureCodeConstants.XOR_2_1_SCHEMA,
   DEFAULT_CELLSIZE, HdfsConstants.XOR_2_1_POLICY_ID);
+  private static final ErasureCodingPolicy SYS_POLICY5 =
+  new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA,
+  DEFAULT_CELLSIZE, HdfsConstants.RS_10_4_POLICY_ID);
 
   //We may add more later.
   private static final ErasureCodingPolicy[] SYS_POLICIES =
   new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2, SYS_POLICY3,
-  SYS_POLICY4};
+  SYS_POLICY4, SYS_POLICY5};
 
   // Supported storage policies for striped EC files
   private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE = 
new byte[] {


[38/50] [abbrv] hadoop git commit: MAPREDUCE-6817. The format of job start time in JHS is different from those of submit and finish time. (Haibo Chen via kasha)

2016-12-08 Thread stevel
MAPREDUCE-6817. The format of job start time in JHS is different from those of 
submit and finish time. (Haibo Chen via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a793cec2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a793cec2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a793cec2

Branch: refs/heads/HADOOP-13345
Commit: a793cec2c9a7ad80aaf67be9dec2245e8ecce63c
Parents: 9f5d2c4
Author: Karthik Kambatla 
Authored: Wed Dec 7 13:36:49 2016 -0800
Committer: Karthik Kambatla 
Committed: Wed Dec 7 13:36:49 2016 -0800

--
 .../mapreduce/v2/hs/webapp/HsJobsBlock.java |  2 +-
 .../mapreduce/v2/hs/webapp/dao/JobInfo.java | 15 +-
 .../mapreduce/v2/hs/webapp/dao/TestJobInfo.java | 29 +++-
 3 files changed, 43 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a793cec2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
index 92c5e2b..b234ca3 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
@@ -79,7 +79,7 @@ public class HsJobsBlock extends HtmlBlock {
   JobInfo job = new JobInfo(j);
   jobsTableData.append("[\"")
   .append(dateFormat.format(new Date(job.getSubmitTime(.append("\",\"")
-  .append(job.getStartTimeStr()).append("\",\"")
+  .append(job.getFormattedStartTimeStr(dateFormat)).append("\",\"")
   .append(dateFormat.format(new Date(job.getFinishTime(.append("\",\"")
   .append("")
   .append(job.getId()).append("\",\"")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a793cec2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java
index eddbccf..d12a729 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
 
+import java.text.DateFormat;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.List;
@@ -27,6 +28,7 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlTransient;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
@@ -44,7 +46,8 @@ import org.apache.hadoop.security.authorize.AccessControlList;
 @XmlRootElement(name = "job")
 @XmlAccessorType(XmlAccessType.FIELD)
 public class JobInfo {
-  private static final String NA = "N/A";
+  @VisibleForTesting
+  static final String NA = "N/A";
 
   protected long submitTime;
   protected long startTime;
@@ -228,6 +231,16 @@ public class JobInfo {
 return this.startTime;
   }
 
+  public String getFormattedStartTimeStr(final DateFormat dateFormat) {
+String str = NA;
+
+if (startTime >= 0) {
+  str = dateFormat.format(new Date(startTime));
+}
+
+return str;
+  }
+
   public String getStartTimeStr() {
 String str = NA;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a793cec2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/TestJobInfo.java
--
diff --git 

[41/50] [abbrv] hadoop git commit: HADOOP-13842. Update jackson from 1.9.13 to 2.x in hadoop-maven-plugins.

2016-12-08 Thread stevel
HADOOP-13842. Update jackson from 1.9.13 to 2.x in hadoop-maven-plugins.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c774b8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c774b8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c774b8c

Branch: refs/heads/HADOOP-13345
Commit: 3c774b8c98540642eeb706065cf0275f9891d3b2
Parents: ea2895f
Author: Akira Ajisaka 
Authored: Thu Dec 8 11:06:25 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 8 11:06:25 2016 +0900

--
 hadoop-maven-plugins/pom.xml| 12 
 .../apache/hadoop/maven/plugin/protoc/ProtocRunner.java |  5 +++--
 2 files changed, 7 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c774b8c/hadoop-maven-plugins/pom.xml
--
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index 51b8f91..eae03a1 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -47,17 +47,13 @@
   provided
 
 
-  org.codehaus.jackson
-  jackson-core-asl
-
-
-  org.codehaus.jackson
-  jackson-mapper-asl
-
-
   commons-io
   commons-io
 
+
+  com.fasterxml.jackson.core
+  jackson-databind
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c774b8c/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocRunner.java
--
diff --git 
a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocRunner.java
 
b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocRunner.java
index ef15884..e83a8cd 100644
--- 
a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocRunner.java
+++ 
b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocRunner.java
@@ -13,14 +13,15 @@
  */
 package org.apache.hadoop.maven.plugin.protoc;
 
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
 import org.apache.hadoop.maven.plugin.util.Exec;
 import org.apache.hadoop.maven.plugin.util.FileSetUtils;
 import org.apache.maven.model.FileSet;
 import org.apache.maven.plugin.AbstractMojo;
 import org.apache.maven.plugin.MojoExecutionException;
 import org.apache.maven.project.MavenProject;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.type.TypeReference;
 
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: HADOOP-13867. FilterFileSystem should override rename(.., options) to take effect of Rename options called via FilterFileSystem implementations. Contributed By Vinay

2016-12-08 Thread stevel
HADOOP-13867. FilterFileSystem should override rename(.., options) to take 
effect of Rename options called via FilterFileSystem implementations. 
Contributed By Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ef79617
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ef79617
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ef79617

Branch: refs/heads/HADOOP-13345
Commit: 0ef796174ecb5383f79cfecfcbfc4f309d093cd7
Parents: 4c2cf55
Author: Brahma Reddy Battula 
Authored: Thu Dec 8 18:57:43 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Dec 8 18:57:43 2016 +0530

--
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  7 +++
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java | 12 +++-
 2 files changed, 18 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ef79617/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 3f9aaa4..41429ac 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 
@@ -235,6 +236,12 @@ public class FilterFileSystem extends FileSystem {
   }
 
   @Override
+  protected void rename(Path src, Path dst, Rename... options)
+  throws IOException {
+fs.rename(src, dst, options);
+  }
+
+  @Override
   public boolean truncate(Path f, final long newLength) throws IOException {
 return fs.truncate(f, newLength);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ef79617/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 24f3dc8..4cbb8ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -64,7 +64,6 @@ public class TestFilterFileSystem {
 public FSDataOutputStream append(Path f, int bufferSize) throws
 IOException;
 public long getLength(Path f);
-public void rename(Path src, Path dst, Rename... options);
 public boolean exists(Path f);
 public boolean isDirectory(Path f);
 public boolean isFile(Path f);
@@ -264,6 +263,17 @@ public class TestFilterFileSystem {
 verify(mockFs).setWriteChecksum(eq(true));
   }
 
+  @Test
+  public void testRenameOptions() throws Exception {
+FileSystem mockFs = mock(FileSystem.class);
+FileSystem fs = new FilterFileSystem(mockFs);
+Path src = new Path("/src");
+Path dst = new Path("/dest");
+Rename opt = Rename.TO_TRASH;
+fs.rename(src, dst, opt);
+verify(mockFs).rename(eq(src), eq(dst), eq(opt));
+  }
+
   private void checkInit(FilterFileSystem fs, boolean expectInit)
   throws Exception {
 URI uri = URI.create("filter:/");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: HDFS-11140. Directory Scanner should log startup message time correctly. Contributed by Yiqun Lin.

2016-12-08 Thread stevel
HDFS-11140. Directory Scanner should log startup message time correctly. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0857641f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0857641f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0857641f

Branch: refs/heads/HADOOP-13345
Commit: 0857641f62778fad64e8158d78320efb0c8b417c
Parents: f54afdb
Author: Akira Ajisaka 
Authored: Thu Dec 8 13:34:45 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 8 13:34:45 2016 +0900

--
 .../java/org/apache/hadoop/fs/RawLocalFileSystem.java   |  9 +
 .../hadoop/hdfs/server/datanode/DirectoryScanner.java   | 12 
 2 files changed, 17 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0857641f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 7bf429e..84863a5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -791,6 +791,15 @@ public class RawLocalFileSystem extends FileSystem {
   pathToFile(p).toPath(), BasicFileAttributeView.class);
   FileTime fmtime = (mtime >= 0) ? FileTime.fromMillis(mtime) : null;
   FileTime fatime = (atime >= 0) ? FileTime.fromMillis(atime) : null;
+
+  // On some macOS environment, BasicFileAttributeView.setTimes
+  // does not set times correctly when the argument of accessTime is null.
+  // TODO: Remove this after the issue is fixed.
+  if (fatime == null && Shell.MAC) {
+FileStatus f = getFileStatus(p);
+fatime = FileTime.fromMillis(f.getAccessTime());
+  }
+
   view.setTimes(fmtime, fatime, null);
 } catch (NoSuchFileException e) {
   throw new FileNotFoundException("File " + p + " does not exist");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0857641f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index e2baf32..18188dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -37,6 +37,8 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.lang.time.FastDateFormat;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -61,7 +63,7 @@ public class DirectoryScanner implements Runnable {
   private static final int MILLIS_PER_SECOND = 1000;
   private static final String START_MESSAGE =
   "Periodic Directory Tree Verification scan"
-  + " starting at %dms with interval of %dms";
+  + " starting at %s with interval of %dms";
   private static final String START_MESSAGE_WITH_THROTTLE = START_MESSAGE
   + " and throttle limit of %dms/s";
 
@@ -268,10 +270,12 @@ public class DirectoryScanner implements Runnable {
 String logMsg;
 
 if (throttleLimitMsPerSec < MILLIS_PER_SECOND) {
-  logMsg = String.format(START_MESSAGE_WITH_THROTTLE, firstScanTime,
-  scanPeriodMsecs, throttleLimitMsPerSec);
+  logMsg = String.format(START_MESSAGE_WITH_THROTTLE,
+  FastDateFormat.getInstance().format(firstScanTime), scanPeriodMsecs,
+  throttleLimitMsPerSec);
 } else {
-  logMsg = String.format(START_MESSAGE, firstScanTime, scanPeriodMsecs);
+  logMsg = String.format(START_MESSAGE,
+  FastDateFormat.getInstance().format(firstScanTime), scanPeriodMsecs);
 }
 
 LOG.info(logMsg);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: YARN-5963. Spelling errors in logging and exceptions for node manager, client, web-proxy, common, and app history code (gsohn via rkanter)

2016-12-08 Thread stevel
YARN-5963. Spelling errors in logging and exceptions for node manager, client, 
web-proxy, common, and app history code (gsohn via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72fe5468
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72fe5468
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72fe5468

Branch: refs/heads/HADOOP-13345
Commit: 72fe54684198b7df5c5fb2114616dff6d17a4402
Parents: a793cec
Author: Robert Kanter 
Authored: Wed Dec 7 15:07:25 2016 -0800
Committer: Robert Kanter 
Committed: Wed Dec 7 15:07:25 2016 -0800

--
 .../src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java | 2 +-
 .../main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java  | 2 +-
 .../apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java   | 2 +-
 .../src/main/java/org/apache/hadoop/yarn/util/FSDownload.java| 2 +-
 .../apache/hadoop/registry/client/impl/zk/RegistrySecurity.java  | 2 +-
 .../ApplicationHistoryManagerOnTimelineStore.java| 2 +-
 .../yarn/server/nodemanager/WindowsSecureContainerExecutor.java  | 4 ++--
 .../yarn/server/nodemanager/containermanager/AuxServices.java| 4 ++--
 .../nodemanager/containermanager/ContainerManagerImpl.java   | 2 +-
 .../containermanager/localizer/ResourceLocalizationService.java  | 2 +-
 .../containermanager/logaggregation/AppLogAggregatorImpl.java| 2 +-
 .../org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java| 2 +-
 .../java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java | 2 +-
 13 files changed, 15 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72fe5468/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 25e3a46..9b21ff8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -240,7 +240,7 @@ public class LogsCLI extends Configured implements Tool {
   if (appState == YarnApplicationState.NEW
   || appState == YarnApplicationState.NEW_SAVING
   || appState == YarnApplicationState.SUBMITTED) {
-System.err.println("Logs are not avaiable right now.");
+System.err.println("Logs are not available right now.");
 return -1;
   }
 } catch (IOException | YarnException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72fe5468/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 7779ddf..2508c3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -379,7 +379,7 @@ public class RMAdminCLI extends HAAdmin {
 }
 if (nodesDecommissioning) {
   System.out.println("Graceful decommissioning not completed in " + timeout
-  + " seconds, issueing forceful decommissioning command.");
+  + " seconds, issuing forceful decommissioning command.");
   RefreshNodesRequest forcefulRequest = RefreshNodesRequest
   .newInstance(DecommissionType.FORCEFUL);
   adminProtocol.refreshNodes(forcefulRequest);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72fe5468/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index 4103fd6..b275a94 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ 

[46/50] [abbrv] hadoop git commit: YARN-5970. Validate application update timeout request parameters. Contributed by Rohith Sharma K S.

2016-12-08 Thread stevel
YARN-5970. Validate application update timeout request parameters. Contributed 
by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74d0066d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74d0066d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74d0066d

Branch: refs/heads/HADOOP-13345
Commit: 74d0066d3392169bec872f438a0818e2f5323010
Parents: 9ef89ed
Author: Sunil G 
Authored: Thu Dec 8 15:53:56 2016 +0530
Committer: Sunil G 
Committed: Thu Dec 8 15:53:56 2016 +0530

--
 .../java/org/apache/hadoop/yarn/util/Times.java |  3 ++
 .../server/resourcemanager/RMServerUtils.java   |  3 +-
 .../resourcemanager/webapp/RMWebServices.java   | 10 ++---
 .../TestRMWebServicesAppsModification.java  | 39 ++--
 4 files changed, 38 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74d0066d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
index f113bd3..3c41558 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
@@ -105,6 +105,9 @@ public class Times {
*/
   public static long parseISO8601ToLocalTimeInMillis(String isoString)
   throws ParseException {
+if (isoString == null) {
+  throw new ParseException("Invalid input.", -1);
+}
 return isoFormat.get().parse(isoString).getTime();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74d0066d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index a0cdf68..74898ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -507,7 +507,8 @@ public class RMServerUtils {
 } catch (ParseException ex) {
   String message =
   "Expire time is not in ISO8601 format. ISO8601 supported "
-  + "format is -MM-dd'T'HH:mm:ss.SSSZ";
+  + "format is -MM-dd'T'HH:mm:ss.SSSZ. Configured "
+  + "timeout value is " + timeout.getValue();
   throw new YarnException(message, ex);
 }
 if (expireTime < currentTimeMillis) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74d0066d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index a46fb81..bd0602b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -2434,7 +2434,7 @@ public class RMWebServices extends WebServices {
   }
 
   @GET
-  @Path("/apps/{appid}/timeout/{type}")
+  @Path("/apps/{appid}/timeouts/{type}")
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
   MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   public AppTimeoutInfo 

[40/50] [abbrv] hadoop git commit: HDFS-8630. WebHDFS : Support get/set/unset StoragePolicy. Contributed by Surendra Singh Lilhore.

2016-12-08 Thread stevel
HDFS-8630. WebHDFS : Support get/set/unset StoragePolicy. Contributed by 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea2895f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea2895f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea2895f4

Branch: refs/heads/HADOOP-13345
Commit: ea2895f4ed5031809d856faa52e9de5b9501bdea
Parents: 72fe546
Author: Andrew Wang 
Authored: Wed Dec 7 15:52:16 2016 -0800
Committer: Andrew Wang 
Committed: Wed Dec 7 15:52:16 2016 -0800

--
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  49 
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  46 
 .../hadoop/hdfs/web/resources/GetOpParam.java   |   3 +
 .../hadoop/hdfs/web/resources/PostOpParam.java  |   2 +
 .../hadoop/hdfs/web/resources/PutOpParam.java   |   1 +
 .../hdfs/web/resources/StoragePolicyParam.java  |  43 +++
 .../hadoop/fs/http/client/HttpFSFileSystem.java |  92 ++-
 .../hadoop/fs/http/server/FSOperations.java | 130 +
 .../http/server/HttpFSParametersProvider.java   |  23 ++
 .../hadoop/fs/http/server/HttpFSServer.java |  35 +++
 .../fs/http/client/BaseTestHttpFSWith.java  |  52 +++-
 .../web/resources/NamenodeWebHdfsMethods.java   |  39 ++-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|  27 ++
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 261 +++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java |  68 +
 .../hadoop/hdfs/web/resources/TestParam.java|   8 +
 16 files changed, 871 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea2895f4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index a75f4f1..3690a86 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -56,6 +57,8 @@ import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
@@ -588,4 +591,50 @@ class JsonUtilClient {
 lastLocatedBlock, isLastBlockComplete, null, null);
   }
 
+  public static Collection getStoragePolicies(
+  Map json) {
+Map policiesJson = (Map) json.get("BlockStoragePolicies");
+if (policiesJson != null) {
+  List objs = (List) policiesJson.get(BlockStoragePolicy.class
+  .getSimpleName());
+  if (objs != null) {
+BlockStoragePolicy[] storagePolicies = new BlockStoragePolicy[objs
+.size()];
+for (int i = 0; i < objs.size(); i++) {
+  final Map m = (Map) objs.get(i);
+  BlockStoragePolicy blockStoragePolicy = toBlockStoragePolicy(m);
+  storagePolicies[i] = blockStoragePolicy;
+}
+return Arrays.asList(storagePolicies);
+  }
+}
+return new ArrayList(0);
+  }
+
+  public static BlockStoragePolicy toBlockStoragePolicy(Map m) {
+byte id = ((Number) m.get("id")).byteValue();
+String name = (String) m.get("name");
+StorageType[] storageTypes = toStorageTypes((List) m
+.get("storageTypes"));
+StorageType[] creationFallbacks = toStorageTypes((List) m
+.get("creationFallbacks"));
+StorageType[] replicationFallbacks = toStorageTypes((List) m
+.get("replicationFallbacks"));
+Boolean copyOnCreateFile = (Boolean) m.get("copyOnCreateFile");
+return new BlockStoragePolicy(id, name, storageTypes, creationFallbacks,
+replicationFallbacks, copyOnCreateFile.booleanValue());
+  }
+
+  private static StorageType[] toStorageTypes(List list) {
+if (list == null) {
+  return null;
+} else {
+  StorageType[] storageTypes = new StorageType[list.size()];
+  for (int i = 0; i < list.size(); i++) {
+storageTypes[i] = 

[24/50] [abbrv] hadoop git commit: HDFS-11198. NN UI should link DN web address using hostnames. Contributed by Weiwei Yang.

2016-12-08 Thread stevel
HDFS-11198. NN UI should link DN web address using hostnames. Contributed by  
Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4dd4f3a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4dd4f3a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4dd4f3a4

Branch: refs/heads/HADOOP-13345
Commit: 4dd4f3a4bb483795e8669ec34d3efcd6c1b8b465
Parents: 7b988e8
Author: Kihwal Lee 
Authored: Tue Dec 6 08:06:50 2016 -0600
Committer: Kihwal Lee 
Committed: Tue Dec 6 08:06:50 2016 -0600

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html|  8 +---
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js  | 12 ++--
 2 files changed, 7 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4dd4f3a4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 13569fe..3598c80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -314,13 +314,7 @@
   {#LiveNodes}
   
 {name} ({xferaddr})
-
-{@select key=secureMode}
-  {@eq value="off"}{infoAddr}{/eq}
-  {@eq value="on"}{infoSecureAddr}{/eq}
-  {@default}{infoAddr}{/default}
-{/select}
-
+{dnWebAddress}
 {lastContact}s
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4dd4f3a4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index f230b0f..02aa895 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -212,12 +212,12 @@
   var n = nodes[i];
   n.usedPercentage = Math.round((n.used + n.nonDfsUsedSpace) * 1.0 / 
n.capacity * 100);
 
-  var addr = n.infoSecureAddr;
-  var position = addr.lastIndexOf(":");
-  var port = addr.substring(position + 1, addr.length);
-  n.secureMode = "off";
-  if (port != 0) {
-n.secureMode = "on";
+  var port = n.infoAddr.split(":")[1];
+  var securePort = n.infoSecureAddr.split(":")[1];
+  var dnHost = n.name.split(":")[0];
+  n.dnWebAddress = dnHost + ":" + port;
+  if (securePort != 0) {
+n.dnWebAddress = dnHost + ":" + securePort;
   }
 
   if (n.adminState === "In Service") {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HADOOP-13835. Move Google Test Framework code from mapreduce to hadoop-common. Contributed by Varun Vasudev.

2016-12-08 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2a3d6c5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/gtest/gtest-all.cc
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/gtest/gtest-all.cc
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/gtest/gtest-all.cc
deleted file mode 100644
index 4f8c08a..000
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/gtest/gtest-all.cc
+++ /dev/null
@@ -1,10403 +0,0 @@
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: mhe...@google.com (Markus Heule)
-//
-// Google C++ Testing Framework (Google Test)
-//
-// Sometimes it's desirable to build Google Test by compiling a single file.
-// This file serves this purpose.
-
-// This line ensures that gtest.h can be compiled on its own, even
-// when it's fused.
-#include "gtest/gtest.h"
-
-// The following lines pull in the real gtest *.cc files.
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: w...@google.com (Zhanyong Wan)
-//
-// The Google C++ Testing Framework (Google Test)
-
-// Copyright 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of 

[33/50] [abbrv] hadoop git commit: YARN-5184. Fix up incompatible changes introduced on ContainerStatus and NodeReport. Contributed by Sangjin Lee.

2016-12-08 Thread stevel
YARN-5184. Fix up incompatible changes introduced on ContainerStatus and 
NodeReport. Contributed by Sangjin Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7288da5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7288da5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7288da5

Branch: refs/heads/HADOOP-13345
Commit: a7288da595fdf56c3ccd45c0b6ed2e3efaa043a4
Parents: c8d0a04
Author: Junping Du 
Authored: Tue Dec 6 14:36:41 2016 -0800
Committer: Junping Du 
Committed: Tue Dec 6 14:36:41 2016 -0800

--
 .../yarn/api/records/ContainerStatus.java   | 40 
 .../hadoop/yarn/api/records/NodeReport.java | 12 --
 2 files changed, 41 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7288da5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
index 839fd04..d7c75f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
@@ -85,11 +85,17 @@ public abstract class ContainerStatus {
*/
   @Public
   @Evolving
-  public abstract ExecutionType getExecutionType();
+  public ExecutionType getExecutionType() {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 
   @Private
   @Unstable
-  public abstract void setExecutionType(ExecutionType executionType);
+  public void setExecutionType(ExecutionType executionType) {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 
   /**
* Get the ContainerState of the container.
@@ -148,11 +154,17 @@ public abstract class ContainerStatus {
*/
   @Public
   @Unstable
-  public abstract Resource getCapability();
+  public Resource getCapability() {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 
   @Private
   @Unstable
-  public abstract void setCapability(Resource capability);
+  public void setCapability(Resource capability) {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 
   /**
* Get all the IP addresses with which the container run.
@@ -160,11 +172,17 @@ public abstract class ContainerStatus {
*/
   @Public
   @Unstable
-  public abstract List getIPs();
+  public List getIPs() {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 
   @Private
   @Unstable
-  public abstract void setIPs(List ips);
+  public void setIPs(List ips) {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 
   /**
* Get the hostname where the container runs.
@@ -172,9 +190,15 @@ public abstract class ContainerStatus {
*/
   @Public
   @Unstable
-  public abstract String getHost();
+  public String getHost() {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 
   @Private
   @Unstable
-  public abstract void setHost(String host);
+  public void setHost(String host) {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7288da5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
index 412010a..885a3b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
@@ -203,12 +203,18 @@ public abstract class NodeReport {
*/
   @Public
   @Stable
-  public abstract ResourceUtilization getAggregatedContainersUtilization();
+  public ResourceUtilization getAggregatedContainersUtilization() {
+throw new UnsupportedOperationException(
+"subclass 

[28/50] [abbrv] hadoop git commit: HADOOP-13827. Add reencryptEncryptedKey interface to KMS.

2016-12-08 Thread stevel
HADOOP-13827. Add reencryptEncryptedKey interface to KMS.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79d90b81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79d90b81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79d90b81

Branch: refs/heads/HADOOP-13345
Commit: 79d90b810c14d5e3abab75235f587663834ce36c
Parents: df983b5
Author: Xiao Chen 
Authored: Tue Dec 6 12:04:04 2016 -0800
Committer: Xiao Chen 
Committed: Tue Dec 6 12:04:04 2016 -0800

--
 .../apache/hadoop/crypto/key/KeyProvider.java   |  28 +
 .../crypto/key/KeyProviderCryptoExtension.java  |  89 +--
 .../crypto/key/kms/KMSClientProvider.java   |  77 +
 .../hadoop/crypto/key/kms/KMSRESTConstants.java |   1 +
 .../key/kms/LoadBalancingKMSClientProvider.java |  18 +++
 .../key/TestKeyProviderCryptoExtension.java | 114 +++
 ...rKeyGeneratorKeyProviderCryptoExtension.java |   7 +-
 .../hadoop/crypto/key/kms/server/KMS.java   |  38 +--
 .../hadoop/crypto/key/kms/server/KMSAudit.java  |   4 +-
 .../kms/server/KeyAuthorizationKeyProvider.java |  13 +++
 .../hadoop-kms/src/site/markdown/index.md.vm|  39 ++-
 .../hadoop/crypto/key/kms/server/TestKMS.java   |  90 +--
 .../crypto/key/kms/server/TestKMSAudit.java |  11 +-
 13 files changed, 469 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79d90b81/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index c99a7bf..d54c18e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -33,6 +33,8 @@ import java.util.Map;
 
 import com.google.gson.stream.JsonReader;
 import com.google.gson.stream.JsonWriter;
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -86,6 +88,7 @@ public abstract class KeyProvider {
   return material;
 }
 
+@Override
 public String toString() {
   StringBuilder buf = new StringBuilder();
   buf.append("key(");
@@ -105,6 +108,31 @@ public abstract class KeyProvider {
   }
   return buf.toString();
 }
+
+@Override
+public boolean equals(Object rhs) {
+  if (this == rhs) {
+return true;
+  }
+  if (rhs == null || getClass() != rhs.getClass()) {
+return false;
+  }
+  final KeyVersion kv = (KeyVersion) rhs;
+  return new EqualsBuilder().
+  append(name, kv.name).
+  append(versionName, kv.versionName).
+  append(material, kv.material).
+  isEquals();
+}
+
+@Override
+public int hashCode() {
+  return new HashCodeBuilder().
+  append(name).
+  append(versionName).
+  append(material).
+  toHashCode();
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79d90b81/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
index 0543222..9ae98b4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
@@ -188,8 +188,8 @@ public class KeyProviderCryptoExtension extends
 public void drain(String keyName);
 
 /**
- * Generates a key material and encrypts it using the given key version 
name
- * and initialization vector. The generated key material is of the same
+ * Generates a key material and encrypts it using the given key name.
+ * The generated key material is of the same
  * length as the KeyVersion material of the latest key version
  * of the key and is encrypted using the same cipher.
  * 
@@ -210,7 +210,7 @@ public class KeyProviderCryptoExtension 

[32/50] [abbrv] hadoop git commit: HADOOP-13859. TestConfigurationFieldsBase fails for fields that are DEFAULT values of skipped properties. (Haibo Chen via kasha)

2016-12-08 Thread stevel
HADOOP-13859. TestConfigurationFieldsBase fails for fields that are DEFAULT 
values of skipped properties. (Haibo Chen via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8d0a049
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8d0a049
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8d0a049

Branch: refs/heads/HADOOP-13345
Commit: c8d0a049b00536385f06fad412a2288f005bf2ce
Parents: da4ecc9
Author: Karthik Kambatla 
Authored: Tue Dec 6 14:08:08 2016 -0800
Committer: Karthik Kambatla 
Committed: Tue Dec 6 14:10:58 2016 -0800

--
 .../conf/TestConfigurationFieldsBase.java   | 20 ++--
 .../hadoop/tools/TestHdfsConfigFields.java  |  6 -
 .../yarn/conf/TestYarnConfigurationFields.java  | 24 
 3 files changed, 18 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8d0a049/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index 9007c20..2367f21 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -205,6 +205,12 @@ public abstract class TestConfigurationFieldsBase {
   if (!f.getType().getName().equals("java.lang.String")) {
 continue;
   }
+
+  // filter out default-value fields
+  if (isFieldADefaultValue(f)) {
+continue;
+  }
+
   // Convert found member into String
   try {
 value = (String) f.get(null);
@@ -332,6 +338,17 @@ public abstract class TestConfigurationFieldsBase {
   }
 
   /**
+   * Test if a field is a default value of another property by
+   * checking if its name starts with "DEFAULT_" or ends with
+   * "_DEFAULT".
+   * @param field the field to check
+   */
+  private static boolean isFieldADefaultValue(Field field) {
+return field.getName().startsWith("DEFAULT_") ||
+field.getName().endsWith("_DEFAULT");
+  }
+
+  /**
* Utility function to extract public static final default
* member variables from a Configuration type class.
*
@@ -363,8 +380,7 @@ public abstract class TestConfigurationFieldsBase {
   }
   // Special: Stuff any property beginning with "DEFAULT_" into a
   // different hash for later processing
-  if (f.getName().startsWith("DEFAULT_") ||
-  f.getName().endsWith("_DEFAULT")) {
+  if (isFieldADefaultValue(f)) {
 if (retVal.containsKey(f.getName())) {
   continue;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8d0a049/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index 3bbb609..a089d39 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -57,12 +57,6 @@ public class TestHdfsConfigFields extends 
TestConfigurationFieldsBase {
 // Remove deprecated properties listed in Configuration#DeprecationDelta
 configurationPropsToSkipCompare.add(DFSConfigKeys.DFS_DF_INTERVAL_KEY);
 
-// Remove default properties
-configurationPropsToSkipCompare
-.add(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT);
-configurationPropsToSkipCompare
-.add(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
-
 // Remove support property
 configurationPropsToSkipCompare
 .add(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8d0a049/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java

[37/50] [abbrv] hadoop git commit: YARN-5136. Error in handling event type APP_ATTEMPT_REMOVED to the scheduler (Contributed by Wilfred Spiegelenburg via Daniel Templeton)

2016-12-08 Thread stevel
YARN-5136. Error in handling event type APP_ATTEMPT_REMOVED to the scheduler
(Contributed by Wilfred Spiegelenburg via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f5d2c4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f5d2c4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f5d2c4f

Branch: refs/heads/HADOOP-13345
Commit: 9f5d2c4fff6d31acc8b422b52462ef4927c4eea1
Parents: ab923a5
Author: Daniel Templeton 
Authored: Wed Dec 7 11:12:14 2016 -0800
Committer: Daniel Templeton 
Committed: Wed Dec 7 11:12:14 2016 -0800

--
 .../scheduler/fair/FairScheduler.java   | 32 +--
 .../scheduler/fair/TestFairScheduler.java   | 89 
 2 files changed, 115 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f5d2c4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 03df5d4..e790bc2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -633,8 +633,7 @@ public class FairScheduler extends
   RMAppAttemptState rmAppAttemptFinalState, boolean keepContainers) {
 try {
   writeLock.lock();
-  LOG.info(
-  "Application " + applicationAttemptId + " is done." + " finalState="
+  LOG.info("Application " + applicationAttemptId + " is done. finalState="
   + rmAppAttemptFinalState);
   FSAppAttempt attempt = getApplicationAttempt(applicationAttemptId);
 
@@ -644,6 +643,13 @@ public class FairScheduler extends
 return;
   }
 
+  // Check if the attempt is already stopped and don't stop it twice.
+  if (attempt.isStopped()) {
+LOG.info("Application " + applicationAttemptId + " has already been "
++ "stopped!");
+return;
+  }
+
   // Release all the running containers
   for (RMContainer rmContainer : attempt.getLiveContainers()) {
 if (keepContainers && rmContainer.getState().equals(
@@ -1521,6 +1527,13 @@ public class FairScheduler extends
   try {
 attempt.getWriteLock().lock();
 FSLeafQueue oldQueue = (FSLeafQueue) app.getQueue();
+// Check if the attempt is already stopped: don't move stopped app
+// attempt. The attempt has already been removed from all queues.
+if (attempt.isStopped()) {
+  LOG.info("Application " + appId + " is stopped and can't be moved!");
+  throw new YarnException("Application " + appId
+  + " is stopped and can't be moved!");
+}
 String destQueueName = handleMoveToPlanQueue(queueName);
 FSLeafQueue targetQueue = queueMgr.getLeafQueue(destQueueName, false);
 if (targetQueue == null) {
@@ -1617,16 +1630,23 @@ public class FairScheduler extends
* operations will be atomic.
*/
   private void executeMove(SchedulerApplication app,
-  FSAppAttempt attempt, FSLeafQueue oldQueue, FSLeafQueue newQueue) {
-boolean wasRunnable = oldQueue.removeApp(attempt);
+  FSAppAttempt attempt, FSLeafQueue oldQueue, FSLeafQueue newQueue)
+  throws YarnException {
+// Check current runs state. Do not remove the attempt from the queue until
+// after the check has been performed otherwise it could remove the app
+// from a queue without moving it to a new queue.
+boolean wasRunnable = oldQueue.isRunnableApp(attempt);
 // if app was not runnable before, it may be runnable now
 boolean nowRunnable = maxRunningEnforcer.canAppBeRunnable(newQueue,
 attempt);
 if (wasRunnable && !nowRunnable) {
-  throw new IllegalStateException("Should have already verified that app "
+  throw new YarnException("Should have already verified that app "
   + attempt.getApplicationId() + " would be runnable in new queue");
 }
-
+
+// Now it is safe to remove from the queue.
+oldQueue.removeApp(attempt);

[06/50] [abbrv] hadoop git commit: HADOOP-13847. KMSWebApp should close KeyProviderCryptoExtension. Contributed by John Zhuge.

2016-12-08 Thread stevel
HADOOP-13847. KMSWebApp should close KeyProviderCryptoExtension. Contributed by 
John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/291df5c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/291df5c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/291df5c7

Branch: refs/heads/HADOOP-13345
Commit: 291df5c7fb713d5442ee29eb3f272127afb05a3c
Parents: c51bfd2
Author: Xiao Chen 
Authored: Mon Dec 5 09:34:39 2016 -0800
Committer: Xiao Chen 
Committed: Mon Dec 5 09:35:17 2016 -0800

--
 .../apache/hadoop/crypto/key/KeyProviderCryptoExtension.java  | 5 +++--
 .../org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java| 7 ++-
 2 files changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/291df5c7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
index 1ecd9f6..0543222 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
@@ -427,8 +427,9 @@ public class KeyProviderCryptoExtension extends
 
   @Override
   public void close() throws IOException {
-if (getKeyProvider() != null) {
-  getKeyProvider().close();
+KeyProvider provider = getKeyProvider();
+if (provider != null && provider != this) {
+  provider.close();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/291df5c7/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index cd773dd..40ae19f 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -40,9 +40,9 @@ import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
 
 import java.io.File;
+import java.io.IOException;
 import java.net.URI;
 import java.net.URL;
-import java.util.List;
 
 @InterfaceAudience.Private
 public class KMSWebApp implements ServletContextListener {
@@ -215,6 +215,11 @@ public class KMSWebApp implements ServletContextListener {
 
   @Override
   public void contextDestroyed(ServletContextEvent sce) {
+try {
+  keyProviderCryptoExtension.close();
+} catch (IOException ioe) {
+  LOG.error("Error closing KeyProviderCryptoExtension", ioe);
+}
 kmsAudit.shutdown();
 kmsAcls.stopReloader();
 jmxReporter.stop();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: YARN-5929. Missing scheduling policy in the FS queue metric. (Contributed by Yufei Gu via Daniel Templeton)

2016-12-08 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 013a3c454 -> 881de1fba


YARN-5929. Missing scheduling policy in the FS queue metric. (Contributed by 
Yufei Gu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5bd18c49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5bd18c49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5bd18c49

Branch: refs/heads/HADOOP-13345
Commit: 5bd18c49bd5075fa20d24363dceea7828e3fa266
Parents: 2ff84a0
Author: Daniel Templeton 
Authored: Fri Dec 2 13:35:09 2016 -0800
Committer: Daniel Templeton 
Committed: Fri Dec 2 13:55:42 2016 -0800

--
 .../scheduler/fair/FSQueueMetrics.java  | 32 +++--
 .../scheduler/fair/TestFSQueueMetrics.java  | 69 
 2 files changed, 97 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bd18c49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
index a970815..ca375f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metric;
@@ -169,6 +170,12 @@ public class FSQueueMetrics extends QueueMetrics {
 amResourceUsageVCores.set(resource.getVirtualCores());
   }
 
+  /**
+   * Get the scheduling policy.
+   *
+   * @return the scheduling policy
+   */
+  @Metric("Scheduling policy")
   public String getSchedulingPolicy() {
 return schedulingPolicy;
   }
@@ -181,21 +188,38 @@ public class FSQueueMetrics extends QueueMetrics {
   static FSQueueMetrics forQueue(String queueName, Queue parent,
   boolean enableUserMetrics, Configuration conf) {
 MetricsSystem ms = DefaultMetricsSystem.instance();
+return forQueue(ms, queueName, parent, enableUserMetrics, conf);
+  }
+
+  /**
+   * Get the FS queue metric for the given queue. Create one and register it to
+   * metrics system if there isn't one for the queue.
+   *
+   * @param ms the metric system
+   * @param queueName queue name
+   * @param parent parent queue
+   * @param enableUserMetrics  if user metrics is needed
+   * @param conf configuration
+   * @return a FSQueueMetrics object
+   */
+  @VisibleForTesting
+  public synchronized
+  static FSQueueMetrics forQueue(MetricsSystem ms, String queueName,
+  Queue parent, boolean enableUserMetrics, Configuration conf) {
 QueueMetrics metrics = queueMetrics.get(queueName);
 if (metrics == null) {
   metrics = new FSQueueMetrics(ms, queueName, parent, enableUserMetrics, 
conf)
   .tag(QUEUE_INFO, queueName);
-  
+
   // Register with the MetricsSystems
   if (ms != null) {
 metrics = ms.register(
-sourceName(queueName).toString(), 
-"Metrics for queue: " + queueName, metrics);
+sourceName(queueName).toString(),
+"Metrics for queue: " + queueName, metrics);
   }
   queueMetrics.put(queueName, metrics);
 }
 
 return (FSQueueMetrics)metrics;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bd18c49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueMetrics.java
 

[11/50] [abbrv] hadoop git commit: HADOOP-13675. Bug in return value for delete() calls in WASB. Contributed by Dushyanth

2016-12-08 Thread stevel
HADOOP-13675. Bug in return value for delete() calls in WASB. Contributed by 
Dushyanth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15dd1f33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15dd1f33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15dd1f33

Branch: refs/heads/HADOOP-13345
Commit: 15dd1f3381069c5fdc6690e3ab1907a133ba14bf
Parents: 8c46808
Author: Mingliang Liu 
Authored: Mon Dec 5 12:04:07 2016 -0800
Committer: Mingliang Liu 
Committed: Mon Dec 5 12:04:07 2016 -0800

--
 .../fs/azure/AzureNativeFileSystemStore.java|  31 +++--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  25 ++--
 .../hadoop/fs/azure/NativeFileSystemStore.java  |  23 +++-
 ...estNativeAzureFileSystemConcurrencyLive.java | 119 +++
 4 files changed, 176 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15dd1f33/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 3e864a4..ac6c514 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2045,10 +2045,10 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
*  The key to search for.
* @return The wanted directory, or null if not found.
*/
-  private static FileMetadata getDirectoryInList(
+  private static FileMetadata getFileMetadataInList(
   final Iterable list, String key) {
 for (FileMetadata current : list) {
-  if (current.isDir() && current.getKey().equals(key)) {
+  if (current.getKey().equals(key)) {
 return current;
   }
 }
@@ -2114,7 +2114,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 
   // Add the metadata to the list, but remove any existing duplicate
   // entries first that we may have added by finding nested files.
-  FileMetadata existing = getDirectoryInList(fileMetadata, blobKey);
+  FileMetadata existing = getFileMetadataInList(fileMetadata, blobKey);
   if (existing != null) {
 fileMetadata.remove(existing);
   }
@@ -2141,7 +2141,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 
   // Add the directory metadata to the list only if it's not already
   // there.
-  if (getDirectoryInList(fileMetadata, dirKey) == null) {
+  if (getFileMetadataInList(fileMetadata, dirKey) == null) {
 fileMetadata.add(directoryMetadata);
   }
 
@@ -2249,7 +2249,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 
   // Add the directory metadata to the list only if it's not already
   // there.
-  FileMetadata existing = getDirectoryInList(aFileMetadataList, 
blobKey);
+  FileMetadata existing = getFileMetadataInList(aFileMetadataList, 
blobKey);
   if (existing != null) {
 aFileMetadataList.remove(existing);
   }
@@ -2278,7 +2278,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 // absolute path is being used or not.
 String dirKey = normalizeKey(directory);
 
-if (getDirectoryInList(aFileMetadataList, dirKey) == null) {
+if (getFileMetadataInList(aFileMetadataList, dirKey) == null) {
   // Reached the targeted listing depth. Return metadata for the
   // directory using default permissions.
   //
@@ -2376,18 +2376,24 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 }
   }
 
+  /**
+   * API implementation to delete a blob in the back end azure storage.
+   */
   @Override
-  public void delete(String key, SelfRenewingLease lease) throws IOException {
+  public boolean delete(String key, SelfRenewingLease lease) throws 
IOException {
 try {
   if (checkContainer(ContainerAccessType.ReadThenWrite) == 
ContainerState.DoesntExist) {
 // Container doesn't exist, no need to do anything
-return;
+return true;
   }
 
   // Get the blob reference and delete it.
   CloudBlobWrapper blob = getBlobReference(key);
   if (blob.exists(getInstrumentedContext())) {
 safeDelete(blob, lease);
+return true;
+  } else 

[03/50] [abbrv] hadoop git commit: HADOOP-13257. Improve Azure Data Lake contract tests. Contributed by Vishwajeet Dusane

2016-12-08 Thread stevel
HADOOP-13257. Improve Azure Data Lake contract tests. Contributed by Vishwajeet 
Dusane


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4113ec5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4113ec5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4113ec5f

Branch: refs/heads/HADOOP-13345
Commit: 4113ec5fa5ca049ebaba039b1faf3911c6a34f7b
Parents: 51211a7
Author: Mingliang Liu 
Authored: Fri Dec 2 15:54:57 2016 -0800
Committer: Mingliang Liu 
Committed: Fri Dec 2 15:54:57 2016 -0800

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  24 +-
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   |   6 +-
 .../apache/hadoop/fs/adl/TestListStatus.java|   6 +-
 .../fs/adl/live/TestAdlContractAppendLive.java  |  11 +-
 .../fs/adl/live/TestAdlContractConcatLive.java  |  23 +-
 .../fs/adl/live/TestAdlContractCreateLive.java  |  19 +-
 .../fs/adl/live/TestAdlContractDeleteLive.java  |  11 +-
 .../live/TestAdlContractGetFileStatusLive.java  |  36 ++
 .../fs/adl/live/TestAdlContractMkdirLive.java   |  25 +-
 .../fs/adl/live/TestAdlContractOpenLive.java|  11 +-
 .../fs/adl/live/TestAdlContractRenameLive.java  |  30 +-
 .../fs/adl/live/TestAdlContractRootDirLive.java |  19 +-
 .../fs/adl/live/TestAdlContractSeekLive.java|  11 +-
 .../live/TestAdlDifferentSizeWritesLive.java|  69 ++--
 .../live/TestAdlFileContextCreateMkdirLive.java |  67 
 .../TestAdlFileContextMainOperationsLive.java   |  99 ++
 .../adl/live/TestAdlFileSystemContractLive.java |  57 +---
 .../live/TestAdlInternalCreateNonRecursive.java | 134 
 .../fs/adl/live/TestAdlPermissionLive.java  | 116 +++
 .../adl/live/TestAdlSupportedCharsetInPath.java | 334 +++
 .../apache/hadoop/fs/adl/live/TestMetadata.java | 111 ++
 21 files changed, 995 insertions(+), 224 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 9083afc..bd43c52 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -346,7 +346,6 @@ public class AdlFileSystem extends FileSystem {
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
-  @Deprecated
   @Override
   public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
   EnumSet flags, int bufferSize, short replication,
@@ -471,6 +470,10 @@ public class AdlFileSystem extends FileSystem {
   @Override
   public boolean rename(final Path src, final Path dst) throws IOException {
 statistics.incrementWriteOps(1);
+if (toRelativeFilePath(src).equals("/")) {
+  return false;
+}
+
 return adlClient.rename(toRelativeFilePath(src), toRelativeFilePath(dst));
   }
 
@@ -522,9 +525,24 @@ public class AdlFileSystem extends FileSystem {
   public boolean delete(final Path path, final boolean recursive)
   throws IOException {
 statistics.incrementWriteOps(1);
+String relativePath = toRelativeFilePath(path);
+// Delete on root directory not supported.
+if (relativePath.equals("/")) {
+  // This is important check after recent commit
+  // HADOOP-12977 and HADOOP-13716 validates on root for
+  // 1. if root is empty and non recursive delete then return false.
+  // 2. if root is non empty and non recursive delete then throw exception.
+  if (!recursive
+  && adlClient.enumerateDirectory(toRelativeFilePath(path), 1).size()
+  > 0) {
+throw new IOException("Delete on root is not supported.");
+  }
+  return false;
+}
+
 return recursive ?
-adlClient.deleteRecursive(toRelativeFilePath(path)) :
-adlClient.delete(toRelativeFilePath(path));
+adlClient.deleteRecursive(relativePath) :
+adlClient.delete(relativePath);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4113ec5f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
index 734256a..172663c 100644
--- 

[10/50] [abbrv] hadoop git commit: HDFS-11094. Send back HAState along with NamespaceInfo during a versionRequest as an optional parameter. Contributed by Eric Badger

2016-12-08 Thread stevel
HDFS-11094. Send back HAState along with NamespaceInfo during a versionRequest 
as an optional parameter. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c468085
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c468085
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c468085

Branch: refs/heads/HADOOP-13345
Commit: 8c4680852b20ad0e65e77dd123c9ba5bb6f2fa39
Parents: 43ebff2
Author: Mingliang Liu 
Authored: Mon Dec 5 11:34:13 2016 -0800
Committer: Mingliang Liu 
Committed: Mon Dec 5 11:48:58 2016 -0800

--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 76 +---
 .../hdfs/server/datanode/BPOfferService.java| 10 ++-
 .../hdfs/server/datanode/BPServiceActor.java|  4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  8 ++-
 .../hdfs/server/protocol/NamespaceInfo.java | 26 +++
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto |  2 +
 .../server/datanode/TestBPOfferService.java | 31 
 .../hdfs/server/namenode/TestFSNamesystem.java  | 21 ++
 8 files changed, 148 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c468085/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 78371f5..1e6d882 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -26,7 +26,7 @@ import com.google.protobuf.ByteString;
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -338,7 +338,8 @@ public class PBHelper {
 StorageInfoProto storage = info.getStorageInfo();
 return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(),
 info.getBlockPoolID(), storage.getCTime(), info.getBuildVersion(),
-info.getSoftwareVersion(), info.getCapabilities());
+info.getSoftwareVersion(), info.getCapabilities(),
+convert(info.getState()));
   }
 
   public static NamenodeCommand convert(NamenodeCommandProto cmd) {
@@ -744,43 +745,68 @@ public class PBHelper {
   }
   
   public static NamespaceInfoProto convert(NamespaceInfo info) {
-return NamespaceInfoProto.newBuilder()
-.setBlockPoolID(info.getBlockPoolID())
+NamespaceInfoProto.Builder builder = NamespaceInfoProto.newBuilder();
+builder.setBlockPoolID(info.getBlockPoolID())
 .setBuildVersion(info.getBuildVersion())
 .setUnused(0)
 .setStorageInfo(PBHelper.convert((StorageInfo)info))
 .setSoftwareVersion(info.getSoftwareVersion())
-.setCapabilities(info.getCapabilities())
-.build();
+.setCapabilities(info.getCapabilities());
+HAServiceState state = info.getState();
+if(state != null) {
+  builder.setState(convert(info.getState()));
+}
+return builder.build();
   }
 
-  public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) {
-if (s == null) return null;
-switch (s.getState()) {
+  public static HAServiceState convert(HAServiceStateProto s) {
+if (s == null) {
+  return null;
+}
+switch (s) {
+case INITIALIZING:
+  return HAServiceState.INITIALIZING;
 case ACTIVE:
-  return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid());
+  return HAServiceState.ACTIVE;
 case STANDBY:
-  return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid());
+  return HAServiceState.STANDBY;
 default:
-  throw new IllegalArgumentException("Unexpected 
NNHAStatusHeartbeat.State:" + s.getState());
+  throw new IllegalArgumentException("Unexpected HAServiceStateProto:"
+  + s);
 }
   }
 
+  public static HAServiceStateProto convert(HAServiceState s) {
+if (s == null) {
+  return null;
+}
+switch (s) {
+case INITIALIZING:
+  return HAServiceStateProto.INITIALIZING;
+case ACTIVE:
+  return HAServiceStateProto.ACTIVE;
+case STANDBY:
+  return HAServiceStateProto.STANDBY;
+default:
+  throw new IllegalArgumentException("Unexpected HAServiceState:"

[15/50] [abbrv] hadoop git commit: HDFS-10581. Hide redundant table on NameNode WebUI when no nodes are decomissioning. Contributed by Weiwei Yang.

2016-12-08 Thread stevel
HDFS-10581. Hide redundant table on NameNode WebUI when no nodes are 
decomissioning. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e63fa98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e63fa98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e63fa98

Branch: refs/heads/HADOOP-13345
Commit: 8e63fa98eabac55bdb2254306584ad1e759c79eb
Parents: a2b5d60
Author: Andrew Wang 
Authored: Mon Dec 5 18:13:53 2016 -0800
Committer: Andrew Wang 
Committed: Mon Dec 5 18:13:53 2016 -0800

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e63fa98/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index b0db3a1..13569fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -352,6 +352,7 @@
 
 Decommissioning
 
+{?DecomNodes}
 
   
 
@@ -370,6 +371,9 @@
   
   {/DecomNodes}
 
+{:else}
+No nodes are decommissioning
+{/DecomNodes}
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: HDFS-11201. Spelling errors in the logging, help, assertions and exception messages. Contributed by Grant Sohn.

2016-12-08 Thread stevel
HDFS-11201. Spelling errors in the logging, help, assertions and exception 
messages. Contributed by Grant Sohn.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9522e86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9522e86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9522e86

Branch: refs/heads/HADOOP-13345
Commit: b9522e86a55564c2ccb5ca3f1ca871965cbe74de
Parents: 291df5c
Author: Wei-Chiu Chuang 
Authored: Mon Dec 5 09:37:12 2016 -0800
Committer: Wei-Chiu Chuang 
Committed: Mon Dec 5 10:48:25 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java | 4 ++--
 .../main/java/org/apache/hadoop/lib/server/ServerException.java  | 2 +-
 .../java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java   | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/DFSUtil.java| 2 +-
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java  | 2 +-
 .../hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java  | 2 +-
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 2 +-
 .../hadoop/hdfs/server/diskbalancer/command/QueryCommand.java| 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java  | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java  | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java   | 4 ++--
 .../server/namenode/web/resources/NamenodeWebHdfsMethods.java| 2 +-
 13 files changed, 15 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 5783f90..aabd6fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1052,7 +1052,7 @@ public class DFSInputStream extends FSInputStream
 reader.getNetworkDistance(), nread);
 if (nread != len) {
   throw new IOException("truncated return from reader.read(): " +
-  "excpected " + len + ", got " + nread);
+  "expected " + len + ", got " + nread);
 }
 DFSClientFaultInjector.get().readFromDatanodeDelay();
 return;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 51ad08f..db064e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -57,11 +57,11 @@ public class LongBitFormat implements Serializable {
   public long combine(long value, long record) {
 if (value < MIN) {
   throw new IllegalArgumentException(
-  "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
+  "Illegal value: " + NAME + " = " + value + " < MIN = " + MIN);
 }
 if (value > MAX) {
   throw new IllegalArgumentException(
-  "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
+  "Illegal value: " + NAME + " = " + value + " > MAX = " + MAX);
 }
 return (record & ~MASK) | (value << OFFSET);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9522e86/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
index e3759ce..fdca64e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
@@ -38,7 +38,7 @@ public class ServerException extends XException {
 

[27/50] [abbrv] hadoop git commit: HDFS-10930. Refactor: Wrap Datanode IO related operations. Contributed by Xiaoyu Yao.

2016-12-08 Thread stevel
HDFS-10930. Refactor: Wrap Datanode IO related operations. Contributed by 
Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df983b52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df983b52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df983b52

Branch: refs/heads/HADOOP-13345
Commit: df983b524ab68ea0c70cee9033bfff2d28052cbf
Parents: 43cb167
Author: Xiaoyu Yao 
Authored: Mon Dec 5 13:04:39 2016 -0800
Committer: Xiaoyu Yao 
Committed: Tue Dec 6 11:05:47 2016 -0800

--
 .../hdfs/server/datanode/BlockReceiver.java |  66 +++
 .../hdfs/server/datanode/BlockSender.java   | 105 ---
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 +
 .../hdfs/server/datanode/DataStorage.java   |   5 +
 .../hdfs/server/datanode/LocalReplica.java  | 179 +--
 .../server/datanode/LocalReplicaInPipeline.java |  30 ++--
 .../hdfs/server/datanode/ReplicaInPipeline.java |   4 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   3 +-
 .../datanode/fsdataset/ReplicaInputStreams.java | 102 ++-
 .../fsdataset/ReplicaOutputStreams.java | 107 ++-
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  97 +-
 .../impl/FsDatasetAsyncDiskService.java |   7 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   5 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   5 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |   2 +-
 .../server/datanode/SimulatedFSDataset.java |   8 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   2 +-
 .../server/datanode/TestSimulatedFSDataset.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   4 +-
 .../extdataset/ExternalReplicaInPipeline.java   |   6 +-
 20 files changed, 470 insertions(+), 273 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df983b52/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 39419c1..f372072 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -24,10 +24,7 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
-import java.io.FileDescriptor;
-import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.OutputStream;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.nio.ByteBuffer;
@@ -53,7 +50,6 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
@@ -88,8 +84,6 @@ class BlockReceiver implements Closeable {
* the DataNode needs to recalculate checksums before writing.
*/
   private final boolean needsChecksumTranslation;
-  private OutputStream out = null; // to block file at local disk
-  private FileDescriptor outFd;
   private DataOutputStream checksumOut = null; // to crc file at local disk
   private final int bytesPerChecksum;
   private final int checksumSize;
@@ -250,7 +244,8 @@ class BlockReceiver implements Closeable {
   
   final boolean isCreate = isDatanode || isTransfer 
   || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
-  streams = replicaInfo.createStreams(isCreate, requestedChecksum);
+  streams = replicaInfo.createStreams(isCreate, requestedChecksum,
+  datanodeSlowLogThresholdMs);
   assert streams != null : "null streams!";
 
   // read checksum meta information
@@ -260,13 +255,6 @@ class BlockReceiver implements Closeable {
   this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
   this.checksumSize = diskChecksum.getChecksumSize();
 
-  this.out = streams.getDataOut();
-  if (out instanceof FileOutputStream) {
-this.outFd = ((FileOutputStream)out).getFD();
-  } else {
-LOG.warn("Could not get file descriptor for outputstream of class " +
-out.getClass());
-  }
   this.checksumOut = new DataOutputStream(new BufferedOutputStream(
   

[12/50] [abbrv] hadoop git commit: Revert "HADOOP-10930. Refactor: Wrap Datanode IO related operations. Contributed by Xiaoyu Yao."

2016-12-08 Thread stevel
Revert "HADOOP-10930. Refactor: Wrap Datanode IO related operations. 
Contributed by Xiaoyu Yao."

This reverts commit aeecfa24f4fb6af289920cbf8830c394e66bd78e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcedb72a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcedb72a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcedb72a

Branch: refs/heads/HADOOP-13345
Commit: dcedb72af468128458e597f08d22f5c34b744ae5
Parents: 15dd1f3
Author: Xiaoyu Yao 
Authored: Mon Dec 5 12:08:48 2016 -0800
Committer: Xiaoyu Yao 
Committed: Mon Dec 5 12:44:20 2016 -0800

--
 .../hdfs/server/datanode/BlockReceiver.java |  66 ---
 .../hdfs/server/datanode/BlockSender.java   | 105 +++
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 -
 .../hdfs/server/datanode/DataStorage.java   |   5 -
 .../hdfs/server/datanode/LocalReplica.java  | 179 ++-
 .../server/datanode/LocalReplicaInPipeline.java |  30 ++--
 .../hdfs/server/datanode/ReplicaInPipeline.java |   4 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   3 +-
 .../datanode/fsdataset/ReplicaInputStreams.java | 102 +--
 .../fsdataset/ReplicaOutputStreams.java | 107 +--
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  32 ++--
 .../impl/FsDatasetAsyncDiskService.java |   7 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   5 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   5 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |   2 +-
 .../server/datanode/SimulatedFSDataset.java |  13 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   2 +-
 .../server/datanode/TestSimulatedFSDataset.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   4 +-
 .../extdataset/ExternalReplicaInPipeline.java   |   6 +-
 20 files changed, 238 insertions(+), 445 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcedb72a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index f372072..39419c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -24,7 +24,10 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
+import java.io.FileDescriptor;
+import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.nio.ByteBuffer;
@@ -50,6 +53,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
@@ -84,6 +88,8 @@ class BlockReceiver implements Closeable {
* the DataNode needs to recalculate checksums before writing.
*/
   private final boolean needsChecksumTranslation;
+  private OutputStream out = null; // to block file at local disk
+  private FileDescriptor outFd;
   private DataOutputStream checksumOut = null; // to crc file at local disk
   private final int bytesPerChecksum;
   private final int checksumSize;
@@ -244,8 +250,7 @@ class BlockReceiver implements Closeable {
   
   final boolean isCreate = isDatanode || isTransfer 
   || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
-  streams = replicaInfo.createStreams(isCreate, requestedChecksum,
-  datanodeSlowLogThresholdMs);
+  streams = replicaInfo.createStreams(isCreate, requestedChecksum);
   assert streams != null : "null streams!";
 
   // read checksum meta information
@@ -255,6 +260,13 @@ class BlockReceiver implements Closeable {
   this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
   this.checksumSize = diskChecksum.getChecksumSize();
 
+  this.out = streams.getDataOut();
+  if (out instanceof FileOutputStream) {
+this.outFd = ((FileOutputStream)out).getFD();
+  } else {
+LOG.warn("Could not get file descriptor for outputstream of class " +
+out.getClass());
+  }
   

[45/50] [abbrv] hadoop git commit: HDFS-11140. Directory Scanner should log startup message time correctly. Contributed by Yiqun Lin.

2016-12-08 Thread stevel
HDFS-11140. Directory Scanner should log startup message time correctly. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ef89ede
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ef89ede
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ef89ede

Branch: refs/heads/HADOOP-13345
Commit: 9ef89ede2f18c76c601fd585cb9d47511f5fc3ed
Parents: deb4743
Author: Akira Ajisaka 
Authored: Thu Dec 8 13:38:02 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 8 13:38:02 2016 +0900

--
 .../hadoop/hdfs/server/datanode/DirectoryScanner.java   | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef89ede/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index e2baf32..18188dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -37,6 +37,8 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.lang.time.FastDateFormat;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -61,7 +63,7 @@ public class DirectoryScanner implements Runnable {
   private static final int MILLIS_PER_SECOND = 1000;
   private static final String START_MESSAGE =
   "Periodic Directory Tree Verification scan"
-  + " starting at %dms with interval of %dms";
+  + " starting at %s with interval of %dms";
   private static final String START_MESSAGE_WITH_THROTTLE = START_MESSAGE
   + " and throttle limit of %dms/s";
 
@@ -268,10 +270,12 @@ public class DirectoryScanner implements Runnable {
 String logMsg;
 
 if (throttleLimitMsPerSec < MILLIS_PER_SECOND) {
-  logMsg = String.format(START_MESSAGE_WITH_THROTTLE, firstScanTime,
-  scanPeriodMsecs, throttleLimitMsPerSec);
+  logMsg = String.format(START_MESSAGE_WITH_THROTTLE,
+  FastDateFormat.getInstance().format(firstScanTime), scanPeriodMsecs,
+  throttleLimitMsPerSec);
 } else {
-  logMsg = String.format(START_MESSAGE, firstScanTime, scanPeriodMsecs);
+  logMsg = String.format(START_MESSAGE,
+  FastDateFormat.getInstance().format(firstScanTime), scanPeriodMsecs);
 }
 
 LOG.info(logMsg);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: HADOOP-13855. Fix a couple of the s3a statistic names to be consistent with the rest. Contributed by Steve Loughran

2016-12-08 Thread stevel
HADOOP-13855. Fix a couple of the s3a statistic names to be consistent with the 
rest. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51211a7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51211a7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51211a7d

Branch: refs/heads/HADOOP-13345
Commit: 51211a7d7aa342b93951fe61da3f624f0652e101
Parents: 5bd18c4
Author: Mingliang Liu 
Authored: Fri Dec 2 13:48:15 2016 -0800
Committer: Mingliang Liu 
Committed: Fri Dec 2 14:01:42 2016 -0800

--
 .../src/main/java/org/apache/hadoop/fs/s3a/Statistic.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51211a7d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
index 36ec50b..789c6d7 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
@@ -92,12 +92,12 @@ public enum Statistic {
   "Count of times the TCP stream was aborted"),
   STREAM_BACKWARD_SEEK_OPERATIONS("stream_backward_seek_operations",
   "Number of executed seek operations which went backwards in a stream"),
-  STREAM_CLOSED("streamClosed", "Count of times the TCP stream was closed"),
+  STREAM_CLOSED("stream_closed", "Count of times the TCP stream was closed"),
   STREAM_CLOSE_OPERATIONS("stream_close_operations",
   "Total count of times an attempt to close a data stream was made"),
   STREAM_FORWARD_SEEK_OPERATIONS("stream_forward_seek_operations",
   "Number of executed seek operations which went forward in a stream"),
-  STREAM_OPENED("streamOpened",
+  STREAM_OPENED("stream_opened",
   "Total count of times an input stream to object store was opened"),
   STREAM_READ_EXCEPTIONS("stream_read_exceptions",
   "Number of seek operations invoked on input streams"),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: HDFS-11211. Add a time unit to the DataNode client trace format. Contributed by Jagadesh Kiran N

2016-12-08 Thread stevel
HDFS-11211. Add a time unit to the DataNode client trace format. Contributed by 
Jagadesh Kiran N


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43cb1678
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43cb1678
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43cb1678

Branch: refs/heads/HADOOP-13345
Commit: 43cb1678cc000b1fbf8b813ee14ecef19b86a55c
Parents: ed89856
Author: Mingliang Liu 
Authored: Tue Dec 6 09:12:11 2016 -0800
Committer: Mingliang Liu 
Committed: Tue Dec 6 09:12:11 2016 -0800

--
 .../main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cb1678/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 22a70fa..b845da0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -273,7 +273,7 @@ public class DataNode extends ReconfigurableBase
 ", offset: %s" + // offset
 ", srvID: %s" +  // DatanodeRegistration
 ", blockid: %s" + // block id
-", duration: %s";  // duration time
+", duration(ns): %s";  // duration time
 
   static final Log ClientTraceLog =
 LogFactory.getLog(DataNode.class.getName() + ".clienttrace");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: HADOOP-13852 hadoop build to allow hadoop version property to be explicitly set. Contriibuted by Steve Loughran

2016-12-08 Thread stevel
HADOOP-13852 hadoop build to allow hadoop version property to be explicitly 
set. Contriibuted by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2655157
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2655157
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2655157

Branch: refs/heads/HADOOP-13345
Commit: c2655157257079b8541d71bb1e5b6cbae75561ff
Parents: 0ef7961
Author: Steve Loughran 
Authored: Thu Dec 8 17:57:15 2016 +
Committer: Steve Loughran 
Committed: Thu Dec 8 17:57:15 2016 +

--
 BUILDING.txt | 11 ++-
 hadoop-common-project/hadoop-common/pom.xml  |  3 +++
 .../src/main/resources/common-version-info.properties|  4 ++--
 .../src/main/resources/yarn-version-info.properties  |  2 +-
 4 files changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2655157/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 8b2bba6..7afc3f0 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -390,7 +390,7 @@ http://www.zlib.net/
 
--
 Building distributions:
 
- * Build distribution with native code: mvn package 
[-Pdist][-Pdocs][-Psrc][-Dtar]
+ * Build distribution with native code: mvn package 
[-Pdist][-Pdocs][-Psrc][-Dtar][-Dmaven.javadoc.skip=true]
 
 
--
 Running compatibility checks with checkcompatibility.py
@@ -402,3 +402,12 @@ managers to compare the compatibility of a previous and 
current release.
 As an example, this invocation will check the compatibility of interfaces 
annotated as Public or LimitedPrivate:
 
 ./dev-support/bin/checkcompatibility.py --annotation 
org.apache.hadoop.classification.InterfaceAudience.Public --annotation 
org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate --include 
"hadoop.*" branch-2.7.2 trunk
+
+--
+Changing the Hadoop version declared returned by VersionInfo
+
+If for compatibility reasons the version of Hadoop has to be declared as a 2.x 
release in the information returned by
+org.apache.hadoop.util.VersionInfo, set the property declared.hadoop.version 
to the desired version.
+For example: mvn package -Pdist -Ddeclared.hadoop.version=2.11
+
+If unset, the project version declared in the POM file is used.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2655157/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index c9b282f..aa20f79 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -36,6 +36,9 @@
 true
 ../etc/hadoop
 wsce-site.xml
+
+${pom.version}
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2655157/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
 
b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
index ad9a24d..9b74960 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
+++ 
b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
@@ -16,11 +16,11 @@
 # limitations under the License.
 #
 
-version=${pom.version}
+version=${declared.hadoop.version}
 revision=${version-info.scm.commit}
 branch=${version-info.scm.branch}
 user=${user.name}
 date=${version-info.build.time}
 url=${version-info.scm.uri}
 srcChecksum=${version-info.source.md5}
-protocVersion=${protobuf.version}
\ No newline at end of file
+protocVersion=${protobuf.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2655157/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
index 9a8575c..ee6f13d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
+++ 

[30/50] [abbrv] hadoop git commit: MAPREDUCE-6571. JobEndNotification info logs are missing in AM container syslog (haibochen via rkanter)

2016-12-08 Thread stevel
MAPREDUCE-6571. JobEndNotification info logs are missing in AM container syslog 
(haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b4a9970
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b4a9970
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b4a9970

Branch: refs/heads/HADOOP-13345
Commit: 6b4a9970f187ab399adf7469e494a530f08993b5
Parents: f53b481
Author: Robert Kanter 
Authored: Tue Dec 6 13:11:03 2016 -0800
Committer: Robert Kanter 
Committed: Tue Dec 6 13:11:03 2016 -0800

--
 .../apache/hadoop/mapreduce/v2/app/MRAppMaster.java   | 11 ---
 .../org/apache/hadoop/mapreduce/v2/app/MRApp.java | 14 --
 2 files changed, 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b4a9970/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index b383a02..78c8bdd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -151,7 +151,6 @@ import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.SystemClock;
-import org.apache.log4j.LogManager;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -1277,14 +1276,9 @@ public class MRAppMaster extends CompositeService {
 }
   }
 
-  protected void shutdownTaskLog() {
-TaskLog.syncLogsShutdown(logSyncer);
-  }
-
   @Override
   public void stop() {
 super.stop();
-shutdownTaskLog();
   }
 
   private boolean isRecoverySupported() throws IOException {
@@ -1824,14 +1818,9 @@ public class MRAppMaster extends CompositeService {
 T call(Configuration conf) throws Exception;
   }
 
-  protected void shutdownLogManager() {
-LogManager.shutdown();
-  }
-
   @Override
   protected void serviceStop() throws Exception {
 super.serviceStop();
-shutdownLogManager();
   }
 
   public ClientService getClientService() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b4a9970/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
index 3472b1f..2d0b5ea 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
@@ -816,19 +816,5 @@ public class MRApp extends MRAppMaster {
 return token.decodeIdentifier();
   }
 
-  @Override
-  protected void shutdownTaskLog() {
-// Avoid closing the logging system during unit tests,
-// otherwise subsequent MRApp instances in the same test
-// will fail to log anything.
-  }
-
-  @Override
-  protected void shutdownLogManager() {
-// Avoid closing the logging system during unit tests,
-// otherwise subsequent MRApp instances in the same test
-// will fail to log anything.
-  }
-
 }
  


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: HDFS-10206. Datanodes not sorted properly by distance when the reader isn't a datanode. (Nandakumar via mingma)

2016-12-08 Thread stevel
HDFS-10206. Datanodes not sorted properly by distance when the reader isn't a 
datanode. (Nandakumar via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c73e08a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c73e08a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c73e08a6

Branch: refs/heads/HADOOP-13345
Commit: c73e08a6dad46cad14b38a4a586a5cda1622b206
Parents: 563480d
Author: Ming Ma 
Authored: Wed Dec 7 08:26:09 2016 -0800
Committer: Ming Ma 
Committed: Wed Dec 7 08:26:09 2016 -0800

--
 .../org/apache/hadoop/net/NetworkTopology.java  | 158 +--
 .../server/blockmanagement/DatanodeManager.java |  12 +-
 .../apache/hadoop/net/TestNetworkTopology.java  |  29 +++-
 3 files changed, 182 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c73e08a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 14c870d..5751d2b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -57,6 +57,10 @@ public class NetworkTopology {
   public static final Logger LOG =
   LoggerFactory.getLogger(NetworkTopology.class);
 
+  private static final char PATH_SEPARATOR = '/';
+  private static final String PATH_SEPARATOR_STR = "/";
+  private static final String ROOT = "/";
+
   public static class InvalidTopologyException extends RuntimeException {
 private static final long serialVersionUID = 1L;
 public InvalidTopologyException(String msg) {
@@ -916,7 +920,7 @@ public class NetworkTopology {
 }
   }
 
-  /** convert a network tree to a string */
+  /** convert a network tree to a string. */
   @Override
   public String toString() {
 // print the number of racks
@@ -970,19 +974,108 @@ public class NetworkTopology {
* @return weight
*/
   protected int getWeight(Node reader, Node node) {
-// 0 is local, 1 is same rack, 2 is off rack
-// Start off by initializing to off rack
-int weight = 2;
-if (reader != null) {
-  if (reader.equals(node)) {
-weight = 0;
-  } else if (isOnSameRack(reader, node)) {
-weight = 1;
+// 0 is local, 2 is same rack, and each level on each node increases the
+//weight by 1
+//Start off by initializing to Integer.MAX_VALUE
+int weight = Integer.MAX_VALUE;
+if (reader != null && node != null) {
+  if(reader.equals(node)) {
+return 0;
+  }
+  int maxReaderLevel = reader.getLevel();
+  int maxNodeLevel = node.getLevel();
+  int currentLevelToCompare = maxReaderLevel > maxNodeLevel ?
+  maxNodeLevel : maxReaderLevel;
+  Node r = reader;
+  Node n = node;
+  weight = 0;
+  while(r != null && r.getLevel() > currentLevelToCompare) {
+r = r.getParent();
+weight++;
+  }
+  while(n != null && n.getLevel() > currentLevelToCompare) {
+n = n.getParent();
+weight++;
+  }
+  while(r != null && n != null && !r.equals(n)) {
+r = r.getParent();
+n = n.getParent();
+weight+=2;
+  }
+}
+return weight;
+  }
+
+  /**
+   * Returns an integer weight which specifies how far away node is
+   * from reader. A lower value signifies that a node is closer.
+   * It uses network location to calculate the weight
+   *
+   * @param reader Node where data will be read
+   * @param node Replica of data
+   * @return weight
+   */
+  private static int getWeightUsingNetworkLocation(Node reader, Node node) {
+//Start off by initializing to Integer.MAX_VALUE
+int weight = Integer.MAX_VALUE;
+if(reader != null && node != null) {
+  String readerPath = normalizeNetworkLocationPath(
+  reader.getNetworkLocation());
+  String nodePath = normalizeNetworkLocationPath(
+  node.getNetworkLocation());
+
+  //same rack
+  if(readerPath.equals(nodePath)) {
+if(reader.getName().equals(node.getName())) {
+  weight = 0;
+} else {
+  weight = 2;
+}
+  } else {
+String[] readerPathToken = readerPath.split(PATH_SEPARATOR_STR);
+String[] nodePathToken = nodePath.split(PATH_SEPARATOR_STR);
+int maxLevelToCompare = readerPathToken.length > nodePathToken.length ?
+nodePathToken.length : readerPathToken.length;
+int 

[16/50] [abbrv] hadoop git commit: HADOOP-13835. Move Google Test Framework code from mapreduce to hadoop-common. Contributed by Varun Vasudev.

2016-12-08 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2a3d6c5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/gtest/include/gtest/gtest.h
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/gtest/include/gtest/gtest.h
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/gtest/include/gtest/gtest.h
deleted file mode 100644
index c04205d..000
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/gtest/include/gtest/gtest.h
+++ /dev/null
@@ -1,21192 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: w...@google.com (Zhanyong Wan)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file defines the public API for Google Test.  It should be
-// included by any test program that uses Google Test.
-//
-// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
-// leave some internal implementation details in this header file.
-// They are clearly marked by comments like this:
-//
-//   // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-//
-// Such code is NOT meant to be used by a user directly, and is subject
-// to CHANGE WITHOUT NOTICE.  Therefore DO NOT DEPEND ON IT in a user
-// program!
-//
-// Acknowledgment: Google Test borrowed the idea of automatic test
-// registration from Barthelemy Dagenais' (barthel...@prologique.com)
-// easyUnit framework.
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
-#define GTEST_INCLUDE_GTEST_GTEST_H_
-
-#include 
-#include 
-#include 
-
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: w...@google.com (Zhanyong Wan), eef...@gmail.com (Sean Mcafee)
-//
-// The Google C++ Testing Framework (Google Test)
-//

[20/50] [abbrv] hadoop git commit: HADOOP-13835. Move Google Test Framework code from mapreduce to hadoop-common. Contributed by Varun Vasudev.

2016-12-08 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2a3d6c5/hadoop-common-project/hadoop-common/src/main/native/gtest/gtest-all.cc
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/gtest/gtest-all.cc 
b/hadoop-common-project/hadoop-common/src/main/native/gtest/gtest-all.cc
new file mode 100644
index 000..4f8c08a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/native/gtest/gtest-all.cc
@@ -0,0 +1,10403 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mhe...@google.com (Markus Heule)
+//
+// Google C++ Testing Framework (Google Test)
+//
+// Sometimes it's desirable to build Google Test by compiling a single file.
+// This file serves this purpose.
+
+// This line ensures that gtest.h can be compiled on its own, even
+// when it's fused.
+#include "gtest/gtest.h"
+
+// The following lines pull in the real gtest *.cc files.
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: w...@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED 

[44/50] [abbrv] hadoop git commit: Revert "HDFS-11140. Directory Scanner should log startup message time correctly. Contributed by Yiqun Lin."

2016-12-08 Thread stevel
Revert "HDFS-11140. Directory Scanner should log startup message time 
correctly. Contributed by Yiqun Lin."

This reverts commit 0857641f62778fad64e8158d78320efb0c8b417c.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/deb4743b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/deb4743b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/deb4743b

Branch: refs/heads/HADOOP-13345
Commit: deb4743b2c3bb5c23842f0e224b89e41ef2d36d4
Parents: 0857641
Author: Akira Ajisaka 
Authored: Thu Dec 8 13:37:06 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 8 13:37:06 2016 +0900

--
 .../java/org/apache/hadoop/fs/RawLocalFileSystem.java   |  9 -
 .../hadoop/hdfs/server/datanode/DirectoryScanner.java   | 12 
 2 files changed, 4 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/deb4743b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 84863a5..7bf429e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -791,15 +791,6 @@ public class RawLocalFileSystem extends FileSystem {
   pathToFile(p).toPath(), BasicFileAttributeView.class);
   FileTime fmtime = (mtime >= 0) ? FileTime.fromMillis(mtime) : null;
   FileTime fatime = (atime >= 0) ? FileTime.fromMillis(atime) : null;
-
-  // On some macOS environment, BasicFileAttributeView.setTimes
-  // does not set times correctly when the argument of accessTime is null.
-  // TODO: Remove this after the issue is fixed.
-  if (fatime == null && Shell.MAC) {
-FileStatus f = getFileStatus(p);
-fatime = FileTime.fromMillis(f.getAccessTime());
-  }
-
   view.setTimes(fmtime, fatime, null);
 } catch (NoSuchFileException e) {
   throw new FileNotFoundException("File " + p + " does not exist");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/deb4743b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 18188dd..e2baf32 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -37,8 +37,6 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.commons.lang.time.FastDateFormat;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -63,7 +61,7 @@ public class DirectoryScanner implements Runnable {
   private static final int MILLIS_PER_SECOND = 1000;
   private static final String START_MESSAGE =
   "Periodic Directory Tree Verification scan"
-  + " starting at %s with interval of %dms";
+  + " starting at %dms with interval of %dms";
   private static final String START_MESSAGE_WITH_THROTTLE = START_MESSAGE
   + " and throttle limit of %dms/s";
 
@@ -270,12 +268,10 @@ public class DirectoryScanner implements Runnable {
 String logMsg;
 
 if (throttleLimitMsPerSec < MILLIS_PER_SECOND) {
-  logMsg = String.format(START_MESSAGE_WITH_THROTTLE,
-  FastDateFormat.getInstance().format(firstScanTime), scanPeriodMsecs,
-  throttleLimitMsPerSec);
+  logMsg = String.format(START_MESSAGE_WITH_THROTTLE, firstScanTime,
+  scanPeriodMsecs, throttleLimitMsPerSec);
 } else {
-  logMsg = String.format(START_MESSAGE,
-  FastDateFormat.getInstance().format(firstScanTime), scanPeriodMsecs);
+  logMsg = String.format(START_MESSAGE, firstScanTime, scanPeriodMsecs);
 }
 
 LOG.info(logMsg);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

[19/50] [abbrv] hadoop git commit: HADOOP-13835. Move Google Test Framework code from mapreduce to hadoop-common. Contributed by Varun Vasudev.

2016-12-08 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2a3d6c5/hadoop-common-project/hadoop-common/src/main/native/gtest/include/gtest/gtest.h
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/gtest/include/gtest/gtest.h
 
b/hadoop-common-project/hadoop-common/src/main/native/gtest/include/gtest/gtest.h
new file mode 100644
index 000..c04205d
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/gtest/include/gtest/gtest.h
@@ -0,0 +1,21192 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: w...@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for Google Test.  It should be
+// included by any test program that uses Google Test.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+//   // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE.  Therefore DO NOT DEPEND ON IT in a user
+// program!
+//
+// Acknowledgment: Google Test borrowed the idea of automatic test
+// registration from Barthelemy Dagenais' (barthel...@prologique.com)
+// easyUnit framework.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_H_
+
+#include 
+#include 
+#include 
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: w...@google.com (Zhanyong Wan), eef...@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares functions and macros used internally by
+// Google Test.  They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_

[23/50] [abbrv] hadoop git commit: HADOOP-13861. Spelling errors in logging and exceptions for code. Contributed by Grant Sohn.

2016-12-08 Thread stevel
HADOOP-13861. Spelling errors in logging and exceptions for code. Contributed 
by Grant Sohn.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b988e88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b988e88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b988e88

Branch: refs/heads/HADOOP-13345
Commit: 7b988e88992528a0cac2ca8893652c5d4a90c6b9
Parents: 08a7253
Author: Andrew Wang 
Authored: Mon Dec 5 23:18:18 2016 -0800
Committer: Andrew Wang 
Committed: Mon Dec 5 23:18:18 2016 -0800

--
 .../security/authentication/util/ZKSignerSecretProvider.java | 2 +-
 .../src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java| 2 +-
 .../src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java  | 2 +-
 .../org/apache/hadoop/io/erasurecode/rawcoder/util/GF256.java| 2 +-
 .../src/main/java/org/apache/hadoop/io/file/tfile/TFile.java | 2 +-
 .../src/main/java/org/apache/hadoop/io/file/tfile/Utils.java | 2 +-
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java | 4 ++--
 .../src/main/java/org/apache/hadoop/security/KDiag.java  | 2 +-
 .../apache/hadoop/security/alias/LocalJavaKeyStoreProvider.java  | 2 +-
 9 files changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b988e88/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
index 1d16b2d..48dfaaa 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
@@ -258,7 +258,7 @@ public class ZKSignerSecretProvider extends 
RolloverSignerSecretProvider {
 } catch (KeeperException.BadVersionException bve) {
   LOG.debug("Unable to push to znode; another server already did it");
 } catch (Exception ex) {
-  LOG.error("An unexpected exception occured pushing data to ZooKeeper",
+  LOG.error("An unexpected exception occurred pushing data to ZooKeeper",
   ex);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b988e88/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index b14e1f0..1ed01ea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -525,7 +525,7 @@ public class LocalDirAllocator {
 try {
   advance();
 } catch (IOException ie) {
-  throw new RuntimeException("Can't check existance of " + next, ie);
+  throw new RuntimeException("Can't check existence of " + next, ie);
 }
 if (result == null) {
   throw new NoSuchElementException();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b988e88/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
index 0aa3d65..bf30b22 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
@@ -248,7 +248,7 @@ public class CommandFormat {
 private static final long serialVersionUID = 0L;
 
 public DuplicatedOptionException(String duplicatedOption) {
-  super("option " + duplicatedOption + " already exsits!");
+  super("option " + duplicatedOption + " already exists!");
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b988e88/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/GF256.java
--

[22/50] [abbrv] hadoop git commit: Revert "HDFS-11156. Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. Contributed by Weiwei Yang"

2016-12-08 Thread stevel
Revert "HDFS-11156. Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. 
Contributed by Weiwei Yang"

This reverts commit c7ff34f8dcca3a2024230c5383abd9299daa1b20.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08a7253b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08a7253b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08a7253b

Branch: refs/heads/HADOOP-13345
Commit: 08a7253bc0eb6c9155457feecb9c5cdc17c3a814
Parents: b2a3d6c
Author: Andrew Wang 
Authored: Mon Dec 5 23:08:49 2016 -0800
Committer: Andrew Wang 
Committed: Mon Dec 5 23:09:35 2016 -0800

--
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 32 
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 13 ++---
 .../hadoop/hdfs/web/resources/GetOpParam.java   | 12 +
 .../web/resources/NamenodeWebHdfsMethods.java   | 17 ---
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 30 
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 51 
 6 files changed, 4 insertions(+), 151 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08a7253b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 12899f4..a75f4f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -22,7 +22,6 @@ import com.fasterxml.jackson.databind.ObjectReader;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary.Builder;
 import org.apache.hadoop.fs.FileChecksum;
@@ -589,35 +588,4 @@ class JsonUtilClient {
 lastLocatedBlock, isLastBlockComplete, null, null);
   }
 
-  /** Convert a Json map to BlockLocation. **/
-  static BlockLocation toBlockLocation(Map m)
-  throws IOException{
-long length = ((Number) m.get("length")).longValue();
-long offset = ((Number) m.get("offset")).longValue();
-boolean corrupt = Boolean.
-getBoolean(m.get("corrupt").toString());
-String[] storageIds = toStringArray(getList(m, "storageIds"));
-String[] cachedHosts = toStringArray(getList(m, "cachedHosts"));
-String[] hosts = toStringArray(getList(m, "hosts"));
-String[] names = toStringArray(getList(m, "names"));
-String[] topologyPaths = toStringArray(getList(m, "topologyPaths"));
-StorageType[] storageTypes = toStorageTypeArray(
-getList(m, "storageTypes"));
-return new BlockLocation(names, hosts, cachedHosts,
-topologyPaths, storageIds, storageTypes,
-offset, length, corrupt);
-  }
-
-  static String[] toStringArray(List list) {
-if (list == null) {
-  return null;
-} else {
-  final String[] array = new String[list.size()];
-  int i = 0;
-  for (Object object : list) {
-array[i++] = object.toString();
-  }
-  return array;
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08a7253b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index e82e9f6..23804b7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -1610,20 +1610,13 @@ public class WebHdfsFileSystem extends FileSystem
 statistics.incrementReadOps(1);
 storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
 
-final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
+final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS;
 return new FsPathResponseRunner(op, p,
 new OffsetParam(offset), new LengthParam(length)) {
   @Override
-  @SuppressWarnings("unchecked")
   BlockLocation[] decodeResponse(Map json) throws IOException {
-List list = JsonUtilClient.getList(json, 

[31/50] [abbrv] hadoop git commit: MAPREDUCE-6816. Progress bars in Web UI always at 100%. Contributed by Shen Yinjie.

2016-12-08 Thread stevel
MAPREDUCE-6816. Progress bars in Web UI always at 100%. Contributed by Shen 
Yinjie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da4ecc9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da4ecc9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da4ecc9f

Branch: refs/heads/HADOOP-13345
Commit: da4ecc9f8223ac82bff8483594fb29be2f8d4d34
Parents: 6b4a997
Author: Akira Ajisaka 
Authored: Wed Dec 7 06:41:08 2016 +0900
Committer: Akira Ajisaka 
Committed: Wed Dec 7 06:41:08 2016 +0900

--
 .../java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java | 2 +-
 .../org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java| 2 +-
 .../org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java   | 4 ++--
 3 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da4ecc9f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
index f8637e7..01c5b0d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
@@ -125,7 +125,7 @@ public class TaskPage extends AppView {
 
   for (TaskAttempt attempt : getTaskAttempts()) {
 TaskAttemptInfo ta = new TaskAttemptInfo(attempt, true);
-String progress = StringUtils.formatPercent(ta.getProgress() / 100, 2);
+String progress = StringUtils.format("%.2f", ta.getProgress());
 
 String nodeHttpAddr = ta.getNode();
 String diag = ta.getNote() == null ? "" : ta.getNote();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da4ecc9f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
index 7c1aa49..78338ec 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
@@ -94,7 +94,7 @@ public class TasksBlock extends HtmlBlock {
 
   TaskInfo info = new TaskInfo(task);
   String tid = info.getId();
-  String pct = StringUtils.formatPercent(info.getProgress() / 100, 2);
+  String pct = StringUtils.format("%.2f", info.getProgress());
   tasksTableData.append("[\"").append(tid).append("\",\"")
   //Progress bar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da4ecc9f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java
index ffb243b..3ed65b9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java
@@ -104,12 +104,12 @@ public class JobInfo {
 this.mapsCompleted = job.getCompletedMaps();
 this.mapProgress = report.getMapProgress() * 100;
 this.mapProgressPercent =
-StringUtils.formatPercent(report.getMapProgress(), 2);
+StringUtils.format("%.2f", getMapProgress());
 this.reducesTotal = 

[14/50] [abbrv] hadoop git commit: HADOOP-13864. KMS should not require truststore password. Contributed by Mike Yoder.

2016-12-08 Thread stevel
HADOOP-13864. KMS should not require truststore password. Contributed by Mike 
Yoder.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2b5d602
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2b5d602
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2b5d602

Branch: refs/heads/HADOOP-13345
Commit: a2b5d602201a4f619f6a68ec2168a884190d8de6
Parents: f3b8ff5
Author: Xiao Chen 
Authored: Mon Dec 5 12:19:26 2016 -0800
Committer: Xiao Chen 
Committed: Mon Dec 5 17:36:00 2016 -0800

--
 .../security/ssl/FileBasedKeyStoresFactory.java   |  6 --
 .../security/ssl/ReloadingX509TrustManager.java   |  2 +-
 .../ssl/TestReloadingX509TrustManager.java| 18 ++
 3 files changed, 23 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b5d602/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
index 4e59010..a01d11a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
@@ -202,8 +202,10 @@ public class FileBasedKeyStoresFactory implements 
KeyStoresFactory {
   SSL_TRUSTSTORE_PASSWORD_TPL_KEY);
   String truststorePassword = getPassword(conf, passwordProperty, "");
   if (truststorePassword.isEmpty()) {
-throw new GeneralSecurityException("The property '" + passwordProperty 
+
-"' has not been set in the ssl configuration file.");
+// An empty trust store password is legal; the trust store password
+// is only required when writing to a trust store. Otherwise it's
+// an optional integrity check.
+truststorePassword = null;
   }
   long truststoreReloadInterval =
   conf.getLong(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b5d602/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
index 597f8d7..2d3afea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
@@ -167,7 +167,7 @@ public final class ReloadingX509TrustManager
 KeyStore ks = KeyStore.getInstance(type);
 FileInputStream in = new FileInputStream(file);
 try {
-  ks.load(in, password.toCharArray());
+  ks.load(in, (password == null) ? null : password.toCharArray());
   lastLoaded = file.lastModified();
   LOG.debug("Loaded truststore '" + file + "'");
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b5d602/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
index bf058cd..3fb203e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
@@ -199,4 +199,22 @@ public class TestReloadingX509TrustManager {
 }, reloadInterval, 10 * 1000);
   }
 
+  /** No password when accessing a trust store is legal. */
+  @Test
+  public void testNoPassword() throws Exception {
+KeyPair kp = generateKeyPair("RSA");
+cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA");
+cert2 = generateCertificate("CN=Cert2", kp, 30, "SHA1withRSA");
+String truststoreLocation = BASEDIR + "/testreload.jks";
+createTrustStore(truststoreLocation, "password", "cert1", cert1);
+
+final ReloadingX509TrustManager tm =
+new 

[04/50] [abbrv] hadoop git commit: YARN-5746. The state of the parentQueue and its childQueues should be synchronized. Contributed by Xuan Gong

2016-12-08 Thread stevel
YARN-5746. The state of the parentQueue and its childQueues should be 
synchronized. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f885160f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f885160f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f885160f

Branch: refs/heads/HADOOP-13345
Commit: f885160f4ac56a0999e3b051eb7bccce928c1c33
Parents: 4113ec5
Author: Jian He 
Authored: Fri Dec 2 16:17:31 2016 -0800
Committer: Jian He 
Committed: Fri Dec 2 16:17:31 2016 -0800

--
 .../scheduler/capacity/AbstractCSQueue.java | 26 +-
 .../CapacitySchedulerConfiguration.java | 22 -
 .../scheduler/capacity/TestQueueState.java  | 96 
 3 files changed, 139 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f885160f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 3daabaf..dd2f0d9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -291,7 +291,8 @@ public abstract class AbstractCSQueue implements CSQueue {
 
   authorizer = YarnAuthorizationProvider.getInstance(csContext.getConf());
 
-  this.state = csContext.getConfiguration().getState(getQueuePath());
+  initializeQueueState();
+
   this.acls = csContext.getConfiguration().getAcls(getQueuePath());
 
   // Update metrics
@@ -330,6 +331,29 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
   }
 
+  private void initializeQueueState() {
+// inherit from parent if state not set, only do this when we are not root
+if (parent != null) {
+  QueueState configuredState = csContext.getConfiguration()
+  .getConfiguredState(getQueuePath());
+  QueueState parentState = parent.getState();
+  if (configuredState == null) {
+this.state = parentState;
+  } else if (configuredState == QueueState.RUNNING
+  && parentState == QueueState.STOPPED) {
+throw new IllegalArgumentException(
+"The parent queue:" + parent.getQueueName() + " state is STOPPED, "
++ "child queue:" + queueName + " state cannot be RUNNING.");
+  } else {
+this.state = configuredState;
+  }
+} else {
+  // if this is the root queue, get the state from the configuration.
+  // if the state is not set, use RUNNING as default state.
+  this.state = csContext.getConfiguration().getState(getQueuePath());
+}
+  }
+
   protected QueueInfo getQueueInfo() {
 // Deliberately doesn't use lock here, because this method will be invoked
 // from schedulerApplicationAttempt, to avoid deadlock, sacrifice

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f885160f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index f8335a8..bfaeba4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java

[13/50] [abbrv] hadoop git commit: YARN-5921. Incorrect synchronization in RMContextImpl#setHAServiceState/getHAServiceState. Contributed by Varun Saxena

2016-12-08 Thread stevel
YARN-5921. Incorrect synchronization in 
RMContextImpl#setHAServiceState/getHAServiceState. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3b8ff54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3b8ff54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3b8ff54

Branch: refs/heads/HADOOP-13345
Commit: f3b8ff54ab08545d7093bf8861b44ec9912e8dc3
Parents: dcedb72
Author: Naganarasimha 
Authored: Tue Dec 6 06:53:38 2016 +0530
Committer: Naganarasimha 
Committed: Tue Dec 6 06:53:38 2016 +0530

--
 .../hadoop/yarn/server/resourcemanager/RMContextImpl.java | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3b8ff54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index dc8f7d1..3f17ac6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -80,6 +80,8 @@ public class RMContextImpl implements RMContext {
 
   private QueueLimitCalculator queueLimitCalculator;
 
+  private final Object haServiceStateLock = new Object();
+
   /**
* Default constructor. To be used in conjunction with setter methods for
* individual fields.
@@ -254,9 +256,9 @@ public class RMContextImpl implements RMContext {
 this.isHAEnabled = isHAEnabled;
   }
 
-  void setHAServiceState(HAServiceState haServiceState) {
-synchronized (haServiceState) {
-  this.haServiceState = haServiceState;
+  void setHAServiceState(HAServiceState serviceState) {
+synchronized (haServiceStateLock) {
+  this.haServiceState = serviceState;
 }
   }
 
@@ -352,7 +354,7 @@ public class RMContextImpl implements RMContext {
 
   @Override
   public HAServiceState getHAServiceState() {
-synchronized (haServiceState) {
+synchronized (haServiceStateLock) {
   return haServiceState;
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: Revert "HDFS-11201. Spelling errors in the logging, help, assertions and exception messages. Contributed by Grant Sohn."

2016-12-08 Thread stevel
Revert "HDFS-11201. Spelling errors in the logging, help, assertions and 
exception messages. Contributed by Grant Sohn."

This reverts commit b9522e86a55564c2ccb5ca3f1ca871965cbe74de.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b5cceaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b5cceaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b5cceaf

Branch: refs/heads/HADOOP-13345
Commit: 1b5cceaffbdde50a87ede81552dc380832db8e79
Parents: b9522e8
Author: Wei-Chiu Chuang 
Authored: Mon Dec 5 10:54:43 2016 -0800
Committer: Wei-Chiu Chuang 
Committed: Mon Dec 5 10:54:43 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java | 4 ++--
 .../main/java/org/apache/hadoop/lib/server/ServerException.java  | 2 +-
 .../java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java   | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/DFSUtil.java| 2 +-
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java  | 2 +-
 .../hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java  | 2 +-
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 2 +-
 .../hadoop/hdfs/server/diskbalancer/command/QueryCommand.java| 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java  | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java  | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java   | 4 ++--
 .../server/namenode/web/resources/NamenodeWebHdfsMethods.java| 2 +-
 13 files changed, 15 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index aabd6fd..5783f90 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1052,7 +1052,7 @@ public class DFSInputStream extends FSInputStream
 reader.getNetworkDistance(), nread);
 if (nread != len) {
   throw new IOException("truncated return from reader.read(): " +
-  "expected " + len + ", got " + nread);
+  "excpected " + len + ", got " + nread);
 }
 DFSClientFaultInjector.get().readFromDatanodeDelay();
 return;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index db064e4..51ad08f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -57,11 +57,11 @@ public class LongBitFormat implements Serializable {
   public long combine(long value, long record) {
 if (value < MIN) {
   throw new IllegalArgumentException(
-  "Illegal value: " + NAME + " = " + value + " < MIN = " + MIN);
+  "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
 }
 if (value > MAX) {
   throw new IllegalArgumentException(
-  "Illegal value: " + NAME + " = " + value + " > MAX = " + MAX);
+  "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
 }
 return (record & ~MASK) | (value << OFFSET);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b5cceaf/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
index fdca64e..e3759ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java

[2/2] hadoop git commit: Revert "HDFS-5079. Cleaning up NNHAStatusHeartbeat.State from DatanodeProtocolProtos. Contributed by Tao Luo."

2016-12-08 Thread kihwal
Revert "HDFS-5079. Cleaning up NNHAStatusHeartbeat.State from 
DatanodeProtocolProtos. Contributed by Tao Luo."

This reverts commit 4c8db6009291001b685b63f05b59a084972df8d4.

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13d8e552
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13d8e552
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13d8e552

Branch: refs/heads/trunk
Commit: 13d8e552d2eddd66b3c3e6517d175567a4aa905c
Parents: 9f8344d
Author: Kihwal Lee 
Authored: Thu Dec 8 15:41:49 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 8 15:41:49 2016 -0600

--
 .../main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java | 5 ++---
 .../hadoop-hdfs/src/main/proto/DatanodeProtocol.proto | 7 +--
 2 files changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d8e552/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 78371f5..4f6a04e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -26,7 +26,6 @@ import com.google.protobuf.ByteString;
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -772,10 +771,10 @@ public class PBHelper {
   NNHAStatusHeartbeatProto.newBuilder();
 switch (hb.getState()) {
   case ACTIVE:
-builder.setState(HAServiceProtocolProtos.HAServiceStateProto.ACTIVE);
+builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
 break;
   case STANDBY:
-builder.setState(HAServiceProtocolProtos.HAServiceStateProto.STANDBY);
+builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
 break;
   default:
 throw new IllegalArgumentException("Unexpected 
NNHAStatusHeartbeat.State:" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d8e552/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index 9bae4c3..016eae2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -31,7 +31,6 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 package hadoop.hdfs.datanode;
 
-import "HAServiceProtocol.proto";
 import "hdfs.proto";
 import "erasurecoding.proto";
 import "HdfsServer.proto";
@@ -214,7 +213,11 @@ message HeartbeatRequestProto {
  * txid - Highest transaction ID this NN has seen
  */
 message NNHAStatusHeartbeatProto {
-  required hadoop.common.HAServiceStateProto state = 1;
+  enum State {
+ACTIVE = 0;
+STANDBY = 1;
+  }
+  required State state = 1; 
   required uint64 txid = 2;
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: Revert "HDFS-11094. Send back HAState along with NamespaceInfo during a versionRequest as an optional parameter. Contributed by Eric Badger"

2016-12-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 401c73187 -> 13d8e552d


Revert "HDFS-11094. Send back HAState along with NamespaceInfo during a 
versionRequest as an optional parameter. Contributed by Eric Badger"

This reverts commit 8c4680852b20ad0e65e77dd123c9ba5bb6f2fa39.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f8344db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f8344db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f8344db

Branch: refs/heads/trunk
Commit: 9f8344db502b833efe0f2c554b67098e77063d20
Parents: 401c731
Author: Kihwal Lee 
Authored: Thu Dec 8 15:36:08 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 8 15:36:08 2016 -0600

--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 76 +++-
 .../hdfs/server/datanode/BPOfferService.java| 10 +--
 .../hdfs/server/datanode/BPServiceActor.java|  4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +--
 .../hdfs/server/protocol/NamespaceInfo.java | 26 ---
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto |  2 -
 .../server/datanode/TestBPOfferService.java | 31 
 .../hdfs/server/namenode/TestFSNamesystem.java  | 21 --
 8 files changed, 30 insertions(+), 148 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f8344db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 1e6d882..78371f5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -26,7 +26,7 @@ import com.google.protobuf.ByteString;
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
+import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -338,8 +338,7 @@ public class PBHelper {
 StorageInfoProto storage = info.getStorageInfo();
 return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(),
 info.getBlockPoolID(), storage.getCTime(), info.getBuildVersion(),
-info.getSoftwareVersion(), info.getCapabilities(),
-convert(info.getState()));
+info.getSoftwareVersion(), info.getCapabilities());
   }
 
   public static NamenodeCommand convert(NamenodeCommandProto cmd) {
@@ -745,68 +744,43 @@ public class PBHelper {
   }
   
   public static NamespaceInfoProto convert(NamespaceInfo info) {
-NamespaceInfoProto.Builder builder = NamespaceInfoProto.newBuilder();
-builder.setBlockPoolID(info.getBlockPoolID())
+return NamespaceInfoProto.newBuilder()
+.setBlockPoolID(info.getBlockPoolID())
 .setBuildVersion(info.getBuildVersion())
 .setUnused(0)
 .setStorageInfo(PBHelper.convert((StorageInfo)info))
 .setSoftwareVersion(info.getSoftwareVersion())
-.setCapabilities(info.getCapabilities());
-HAServiceState state = info.getState();
-if(state != null) {
-  builder.setState(convert(info.getState()));
-}
-return builder.build();
+.setCapabilities(info.getCapabilities())
+.build();
   }
 
-  public static HAServiceState convert(HAServiceStateProto s) {
-if (s == null) {
-  return null;
-}
-switch (s) {
-case INITIALIZING:
-  return HAServiceState.INITIALIZING;
-case ACTIVE:
-  return HAServiceState.ACTIVE;
-case STANDBY:
-  return HAServiceState.STANDBY;
-default:
-  throw new IllegalArgumentException("Unexpected HAServiceStateProto:"
-  + s);
-}
-  }
-
-  public static HAServiceStateProto convert(HAServiceState s) {
-if (s == null) {
-  return null;
-}
-switch (s) {
-case INITIALIZING:
-  return HAServiceStateProto.INITIALIZING;
+  public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) {
+if (s == null) return null;
+switch (s.getState()) {
 case ACTIVE:
-  return HAServiceStateProto.ACTIVE;
+  return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid());
 case STANDBY:
-  return HAServiceStateProto.STANDBY;
+  return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid());
 default:
-  throw new 

[1/2] hadoop git commit: YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS classes. Contributed by Haibo Chen.

2016-12-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 12bce022e -> 17c4ab7eb
  refs/heads/YARN-5355-branch-2 5ee182f03 -> d8e424d85


YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS 
classes. Contributed by Haibo Chen.

(cherry picked from commit a5a55a54ab1568e941062ea3dabdd237f71f15c4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17c4ab7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17c4ab7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17c4ab7e

Branch: refs/heads/YARN-5355
Commit: 17c4ab7ebb51088caf36fafedae8c256481eeed5
Parents: 12bce02
Author: Sangjin Lee 
Authored: Thu Dec 8 12:31:12 2016 -0800
Committer: Sangjin Lee 
Committed: Thu Dec 8 12:46:47 2016 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 +++
 .../collector/TimelineCollectorManager.java | 26 +--
 .../reader/TimelineReaderServer.java| 26 +--
 .../collector/TestTimelineCollectorManager.java | 74 
 .../reader/TestTimelineReaderServer.java| 43 
 5 files changed, 166 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c4ab7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index bbfcba8..8752e5d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2004,9 +2004,18 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_WRITER_CLASS =
   TIMELINE_SERVICE_PREFIX + "writer.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_WRITER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice"
+  + ".storage.HBaseTimelineWriterImpl";
+
   public static final String TIMELINE_SERVICE_READER_CLASS =
   TIMELINE_SERVICE_PREFIX + "reader.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice" +
+  ".storage.HBaseTimelineReaderImpl";
+
+
   /**
* default schema prefix for hbase tables.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c4ab7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 9758320..19896e8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -59,10 +58,7 @@ public class TimelineCollectorManager extends 
AbstractService {
 
   @Override
   public void serviceInit(Configuration conf) throws Exception {
-writer = ReflectionUtils.newInstance(conf.getClass(
-YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
-HBaseTimelineWriterImpl.class,
-TimelineWriter.class), conf);
+writer = createTimelineWriter(conf);
 writer.init(conf);
 // create a single dedicated thread for flushing the writer on a periodic
 // basis
@@ -75,6 +71,26 @@ public class 

[2/2] hadoop git commit: YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS classes. Contributed by Haibo Chen.

2016-12-08 Thread sjlee
YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS 
classes. Contributed by Haibo Chen.

(cherry picked from commit a5a55a54ab1568e941062ea3dabdd237f71f15c4)
(cherry picked from commit 17c4ab7ebb51088caf36fafedae8c256481eeed5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8e424d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8e424d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8e424d8

Branch: refs/heads/YARN-5355-branch-2
Commit: d8e424d8555790b7ba451c4c662a9988e2f9fa33
Parents: 5ee182f
Author: Sangjin Lee 
Authored: Thu Dec 8 12:31:12 2016 -0800
Committer: Sangjin Lee 
Committed: Thu Dec 8 12:47:49 2016 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 +++
 .../collector/TimelineCollectorManager.java | 26 +--
 .../reader/TimelineReaderServer.java| 26 +--
 .../collector/TestTimelineCollectorManager.java | 74 
 .../reader/TestTimelineReaderServer.java| 43 
 5 files changed, 166 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8e424d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index bb1a5d2..659b5eb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2015,9 +2015,18 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_WRITER_CLASS =
   TIMELINE_SERVICE_PREFIX + "writer.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_WRITER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice"
+  + ".storage.HBaseTimelineWriterImpl";
+
   public static final String TIMELINE_SERVICE_READER_CLASS =
   TIMELINE_SERVICE_PREFIX + "reader.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice" +
+  ".storage.HBaseTimelineReaderImpl";
+
+
   /**
* default schema prefix for hbase tables.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8e424d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 9758320..19896e8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -59,10 +58,7 @@ public class TimelineCollectorManager extends 
AbstractService {
 
   @Override
   public void serviceInit(Configuration conf) throws Exception {
-writer = ReflectionUtils.newInstance(conf.getClass(
-YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
-HBaseTimelineWriterImpl.class,
-TimelineWriter.class), conf);
+writer = createTimelineWriter(conf);
 writer.init(conf);
 // create a single dedicated thread for flushing the writer on a periodic
 // basis
@@ -75,6 +71,26 @@ public class TimelineCollectorManager extends 
AbstractService {
 

hadoop git commit: HDFS-11197. Listing encryption zones fails when deleting a EZ that is on a snapshotted directory. Contributed by Wellington Chevreuil.

2016-12-08 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c73d8399f -> e51f32f74


HDFS-11197. Listing encryption zones fails when deleting a EZ that is on a 
snapshotted directory. Contributed by Wellington Chevreuil.

(cherry picked from commit 401c7318723d8d62c7fc29728f7f4e8d336b4d2f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e51f32f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e51f32f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e51f32f7

Branch: refs/heads/branch-2
Commit: e51f32f74c2c05458431d4f5040baf0f4d417ec6
Parents: c73d839
Author: Xiao Chen 
Authored: Thu Dec 8 12:40:20 2016 -0800
Committer: Xiao Chen 
Committed: Thu Dec 8 12:41:17 2016 -0800

--
 .../server/namenode/EncryptionZoneManager.java  |   8 +-
 .../apache/hadoop/cli/TestCryptoAdminCLI.java   |   3 +-
 .../namenode/TestEncryptionZoneManager.java | 138 +++
 .../src/test/resources/testCryptoConf.xml   |  98 -
 4 files changed, 242 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51f32f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 323ebab..52c66c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -372,8 +372,12 @@ public class EncryptionZoneManager {
contain a reference INode.
   */
   final String pathName = getFullPathName(ezi);
-  INodesInPath iip = dir.getINodesInPath(pathName, DirOp.READ_LINK);
-  INode lastINode = iip.getLastINode();
+  INode inode = dir.getInode(ezi.getINodeId());
+  INode lastINode = null;
+  if (inode.getParent() != null || inode.isRoot()) {
+INodesInPath iip = dir.getINodesInPath(pathName, DirOp.READ_LINK);
+lastINode = iip.getLastINode();
+  }
   if (lastINode == null || lastINode.getId() != ezi.getINodeId()) {
 continue;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51f32f7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
index 99a7c2a..afc668c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
@@ -63,6 +63,7 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
 conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
 HDFSPolicyProvider.class, PolicyProvider.class);
 conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+conf.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 10);
 
 tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString());
 final Path jksPath = new Path(tmpDir.toString(), "test.jks");
@@ -127,7 +128,7 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
   }
 
   private class TestConfigFileParserCryptoAdmin extends
-  CLITestHelper.TestConfigFileParser {
+  CLITestHelperDFS.TestConfigFileParserDFS {
 @Override
 public void endElement(String uri, String localName, String qName)
 throws SAXException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51f32f7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
new file mode 100644
index 000..728e15b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
@@ -0,0 +1,138 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this 

hadoop git commit: HDFS-11197. Listing encryption zones fails when deleting a EZ that is on a snapshotted directory. Contributed by Wellington Chevreuil.

2016-12-08 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 39bf84039 -> 892ee3f19


HDFS-11197. Listing encryption zones fails when deleting a EZ that is on a 
snapshotted directory. Contributed by Wellington Chevreuil.

(cherry picked from commit 401c7318723d8d62c7fc29728f7f4e8d336b4d2f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/892ee3f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/892ee3f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/892ee3f1

Branch: refs/heads/branch-2.8
Commit: 892ee3f191f2349a7b67e5ce4401ec670cc60299
Parents: 39bf840
Author: Xiao Chen 
Authored: Thu Dec 8 12:40:20 2016 -0800
Committer: Xiao Chen 
Committed: Thu Dec 8 12:41:31 2016 -0800

--
 .../server/namenode/EncryptionZoneManager.java  |   8 +-
 .../apache/hadoop/cli/TestCryptoAdminCLI.java   |   3 +-
 .../namenode/TestEncryptionZoneManager.java | 138 +++
 .../src/test/resources/testCryptoConf.xml   |  98 -
 4 files changed, 242 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/892ee3f1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 323ebab..52c66c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -372,8 +372,12 @@ public class EncryptionZoneManager {
contain a reference INode.
   */
   final String pathName = getFullPathName(ezi);
-  INodesInPath iip = dir.getINodesInPath(pathName, DirOp.READ_LINK);
-  INode lastINode = iip.getLastINode();
+  INode inode = dir.getInode(ezi.getINodeId());
+  INode lastINode = null;
+  if (inode.getParent() != null || inode.isRoot()) {
+INodesInPath iip = dir.getINodesInPath(pathName, DirOp.READ_LINK);
+lastINode = iip.getLastINode();
+  }
   if (lastINode == null || lastINode.getId() != ezi.getINodeId()) {
 continue;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/892ee3f1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
index 050daec..b008219 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
@@ -62,6 +62,7 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
 conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
 HDFSPolicyProvider.class, PolicyProvider.class);
 conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+conf.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 10);
 
 tmpDir = new File(System.getProperty("test.build.data", "target"),
 UUID.randomUUID().toString()).getAbsoluteFile();
@@ -127,7 +128,7 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
   }
 
   private class TestConfigFileParserCryptoAdmin extends
-  CLITestHelper.TestConfigFileParser {
+  CLITestHelperDFS.TestConfigFileParserDFS {
 @Override
 public void endElement(String uri, String localName, String qName)
 throws SAXException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/892ee3f1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
new file mode 100644
index 000..728e15b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
@@ -0,0 +1,138 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for 

hadoop git commit: HDFS-11197. Listing encryption zones fails when deleting a EZ that is on a snapshotted directory. Contributed by Wellington Chevreuil.

2016-12-08 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk a5a55a54a -> 401c73187


HDFS-11197. Listing encryption zones fails when deleting a EZ that is on a 
snapshotted directory. Contributed by Wellington Chevreuil.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/401c7318
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/401c7318
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/401c7318

Branch: refs/heads/trunk
Commit: 401c7318723d8d62c7fc29728f7f4e8d336b4d2f
Parents: a5a55a5
Author: Xiao Chen 
Authored: Thu Dec 8 12:40:20 2016 -0800
Committer: Xiao Chen 
Committed: Thu Dec 8 12:40:20 2016 -0800

--
 .../server/namenode/EncryptionZoneManager.java  |   8 +-
 .../apache/hadoop/cli/TestCryptoAdminCLI.java   |   3 +-
 .../namenode/TestEncryptionZoneManager.java | 138 +++
 .../src/test/resources/testCryptoConf.xml   |  98 -
 4 files changed, 242 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/401c7318/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index d23963d..6dff62b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -371,8 +371,12 @@ public class EncryptionZoneManager {
contain a reference INode.
   */
   final String pathName = getFullPathName(ezi);
-  INodesInPath iip = dir.getINodesInPath(pathName, DirOp.READ_LINK);
-  INode lastINode = iip.getLastINode();
+  INode inode = dir.getInode(ezi.getINodeId());
+  INode lastINode = null;
+  if (inode.getParent() != null || inode.isRoot()) {
+INodesInPath iip = dir.getINodesInPath(pathName, DirOp.READ_LINK);
+lastINode = iip.getLastINode();
+  }
   if (lastINode == null || lastINode.getId() != ezi.getINodeId()) {
 continue;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/401c7318/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
index 99a7c2a..afc668c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
@@ -63,6 +63,7 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
 conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
 HDFSPolicyProvider.class, PolicyProvider.class);
 conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+conf.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 10);
 
 tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString());
 final Path jksPath = new Path(tmpDir.toString(), "test.jks");
@@ -127,7 +128,7 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
   }
 
   private class TestConfigFileParserCryptoAdmin extends
-  CLITestHelper.TestConfigFileParser {
+  CLITestHelperDFS.TestConfigFileParserDFS {
 @Override
 public void endElement(String uri, String localName, String qName)
 throws SAXException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/401c7318/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
new file mode 100644
index 000..728e15b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
@@ -0,0 +1,138 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF 

hadoop git commit: YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS classes. Contributed by Haibo Chen.

2016-12-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk c26551572 -> a5a55a54a


YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS 
classes. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5a55a54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5a55a54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5a55a54

Branch: refs/heads/trunk
Commit: a5a55a54ab1568e941062ea3dabdd237f71f15c4
Parents: c265515
Author: Sangjin Lee 
Authored: Thu Dec 8 12:31:12 2016 -0800
Committer: Sangjin Lee 
Committed: Thu Dec 8 12:31:12 2016 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 +++
 .../collector/TimelineCollectorManager.java | 26 +--
 .../reader/TimelineReaderServer.java| 26 +--
 .../collector/TestTimelineCollectorManager.java | 74 
 .../reader/TestTimelineReaderServer.java| 43 
 5 files changed, 166 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a55a54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index fce78c9..4934964 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2019,9 +2019,18 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_WRITER_CLASS =
   TIMELINE_SERVICE_PREFIX + "writer.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_WRITER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice"
+  + ".storage.HBaseTimelineWriterImpl";
+
   public static final String TIMELINE_SERVICE_READER_CLASS =
   TIMELINE_SERVICE_PREFIX + "reader.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice" +
+  ".storage.HBaseTimelineReaderImpl";
+
+
   /** The setting that controls how often the timeline collector flushes the
* timeline writer.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a55a54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 9758320..19896e8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -59,10 +58,7 @@ public class TimelineCollectorManager extends 
AbstractService {
 
   @Override
   public void serviceInit(Configuration conf) throws Exception {
-writer = ReflectionUtils.newInstance(conf.getClass(
-YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
-HBaseTimelineWriterImpl.class,
-TimelineWriter.class), conf);
+writer = createTimelineWriter(conf);
 writer.init(conf);
 // create a single dedicated thread for flushing the writer on a periodic
 // basis
@@ -75,6 +71,26 @@ public class TimelineCollectorManager extends 
AbstractService {
 super.serviceInit(conf);
   }
 
+  

[1/2] hadoop git commit: YARN-5433. Audit dependencies for Category-X. Contributed by Sangjin Lee.

2016-12-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 5c58e55ea -> 5ee182f03


YARN-5433. Audit dependencies for Category-X. Contributed by Sangjin Lee.

(cherry picked from commit f511cc89b66997e496f630bdd299d3068d43fd31)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2dd4185
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2dd4185
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2dd4185

Branch: refs/heads/YARN-5355-branch-2
Commit: f2dd4185478ffd9dbfb57b22d569840bfcd94ff3
Parents: 5c58e55
Author: Sangjin Lee 
Authored: Wed Oct 26 11:31:00 2016 -0700
Committer: Sangjin Lee 
Committed: Thu Dec 8 11:47:24 2016 -0800

--
 LICENSE.txt | 446 +++
 .../pom.xml |   8 +
 .../hadoop-yarn-server-timelineservice/pom.xml  |   4 +
 3 files changed, 458 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2dd4185/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 0e4b492..ee5d528 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -485,6 +485,8 @@ And the binary distribution of this product bundles these 
dependencies under the
 following license:
 Mockito 1.8.5
 SLF4J 1.7.10
+JCodings 1.0.8
+Joni 2.1.2
 

 
 The MIT License (MIT)
@@ -1540,6 +1542,12 @@ JLine 0.9.94
 leveldbjni-all 1.8
 Hamcrest Core 1.3
 xmlenc Library 0.52
+StringTemplate 4 4.0.7
+ANTLR 3 Tool 3.5
+ANTLR 3 Runtime 3.5
+ANTLR StringTemplate 3.2.1
+ASM All 5.0.2
+sqlline 1.1.8
 

 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
@@ -1767,3 +1775,441 @@ representations with respect to the Work not specified 
here. Licensor shall not
 be bound by any additional provisions that may appear in any communication from
 You. This License may not be modified without the mutual written agreement of
 the Licensor and You.
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+jamon-runtime 2.3.1
+
+  MOZILLA PUBLIC LICENSE
+Version 1.1
+
+  ---
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+  A. Any addition to or deletion from the contents of a file
+  containing Original Code or previous Modifications.
+
+  B. Any new file that contains any part of the Original Code or
+  previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 

[2/2] hadoop git commit: YARN-5572. HBaseTimelineWriterImpl appears to reference a bad property name. Contributed by Varun Saxena.

2016-12-08 Thread sjlee
YARN-5572. HBaseTimelineWriterImpl appears to reference a bad property name. 
Contributed by Varun Saxena.

(cherry picked from commit c06114d6a360dddeb66c2dd9ad4fa5dae0cfbfb1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ee182f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ee182f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ee182f0

Branch: refs/heads/YARN-5355-branch-2
Commit: 5ee182f030bab1fa821593fe13d124d021823a39
Parents: f2dd418
Author: Naganarasimha 
Authored: Sun Nov 27 23:35:53 2016 +0530
Committer: Sangjin Lee 
Committed: Thu Dec 8 11:51:10 2016 -0800

--
 .../TestTimelineReaderWebServicesHBaseStorage.java|  2 +-
 .../timelineservice/storage/DataGeneratorForTest.java |  4 ++--
 .../storage/TestHBaseTimelineStorageApps.java |  8 
 .../storage/TestHBaseTimelineStorageEntities.java |  6 +++---
 .../storage/flow/TestHBaseStorageFlowActivity.java|  6 +++---
 .../storage/flow/TestHBaseStorageFlowRun.java | 14 +++---
 .../flow/TestHBaseStorageFlowRunCompaction.java   |  2 +-
 .../storage/HBaseTimelineWriterImpl.java  |  5 -
 8 files changed, 21 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ee182f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index e97ea5b..6bbafe3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -334,7 +334,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 HBaseTimelineWriterImpl hbi = null;
 Configuration c1 = util.getConfiguration();
 try {
-  hbi = new HBaseTimelineWriterImpl(c1);
+  hbi = new HBaseTimelineWriterImpl();
   hbi.init(c1);
   hbi.write(cluster, user, flow, flowVersion, runid, entity.getId(), te);
   hbi.write(cluster, user, flow, flowVersion, runid, entity1.getId(), te1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ee182f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
index b56a752..cafacab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
@@ -183,7 +183,7 @@ final class DataGeneratorForTest {
 te2.addEntity(entity2);
 HBaseTimelineWriterImpl hbi = null;
 try {
-  hbi = new HBaseTimelineWriterImpl(util.getConfiguration());
+  hbi = new HBaseTimelineWriterImpl();
   hbi.init(util.getConfiguration());
   hbi.start();
   String cluster = "cluster1";
@@ -401,7 +401,7 @@ final class DataGeneratorForTest {
 
 HBaseTimelineWriterImpl hbi = null;
 try {
-  hbi = new HBaseTimelineWriterImpl(util.getConfiguration());
+  hbi = new HBaseTimelineWriterImpl();
   hbi.init(util.getConfiguration());
   hbi.start();
   String cluster = "cluster1";


hadoop git commit: HADOOP-13852 hadoop build to allow hadoop version property to be explicitly set. Contriibuted by Steve Loughran

2016-12-08 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0ef796174 -> c26551572


HADOOP-13852 hadoop build to allow hadoop version property to be explicitly 
set. Contriibuted by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2655157
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2655157
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2655157

Branch: refs/heads/trunk
Commit: c2655157257079b8541d71bb1e5b6cbae75561ff
Parents: 0ef7961
Author: Steve Loughran 
Authored: Thu Dec 8 17:57:15 2016 +
Committer: Steve Loughran 
Committed: Thu Dec 8 17:57:15 2016 +

--
 BUILDING.txt | 11 ++-
 hadoop-common-project/hadoop-common/pom.xml  |  3 +++
 .../src/main/resources/common-version-info.properties|  4 ++--
 .../src/main/resources/yarn-version-info.properties  |  2 +-
 4 files changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2655157/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 8b2bba6..7afc3f0 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -390,7 +390,7 @@ http://www.zlib.net/
 
--
 Building distributions:
 
- * Build distribution with native code: mvn package 
[-Pdist][-Pdocs][-Psrc][-Dtar]
+ * Build distribution with native code: mvn package 
[-Pdist][-Pdocs][-Psrc][-Dtar][-Dmaven.javadoc.skip=true]
 
 
--
 Running compatibility checks with checkcompatibility.py
@@ -402,3 +402,12 @@ managers to compare the compatibility of a previous and 
current release.
 As an example, this invocation will check the compatibility of interfaces 
annotated as Public or LimitedPrivate:
 
 ./dev-support/bin/checkcompatibility.py --annotation 
org.apache.hadoop.classification.InterfaceAudience.Public --annotation 
org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate --include 
"hadoop.*" branch-2.7.2 trunk
+
+--
+Changing the Hadoop version declared returned by VersionInfo
+
+If for compatibility reasons the version of Hadoop has to be declared as a 2.x 
release in the information returned by
+org.apache.hadoop.util.VersionInfo, set the property declared.hadoop.version 
to the desired version.
+For example: mvn package -Pdist -Ddeclared.hadoop.version=2.11
+
+If unset, the project version declared in the POM file is used.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2655157/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index c9b282f..aa20f79 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -36,6 +36,9 @@
 true
 ../etc/hadoop
 wsce-site.xml
+
+${pom.version}
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2655157/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
 
b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
index ad9a24d..9b74960 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
+++ 
b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
@@ -16,11 +16,11 @@
 # limitations under the License.
 #
 
-version=${pom.version}
+version=${declared.hadoop.version}
 revision=${version-info.scm.commit}
 branch=${version-info.scm.branch}
 user=${user.name}
 date=${version-info.build.time}
 url=${version-info.scm.uri}
 srcChecksum=${version-info.source.md5}
-protocVersion=${protobuf.version}
\ No newline at end of file
+protocVersion=${protobuf.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2655157/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
index 9a8575c..ee6f13d 100644
--- 

[2/2] hadoop git commit: HADOOP-13867. FilterFileSystem should override rename(.., options) to take effect of Rename options called via FilterFileSystem implementations. Contributed By Vinayakumar B.

2016-12-08 Thread brahma
HADOOP-13867. FilterFileSystem should override rename(.., options) to take 
effect of Rename options called via FilterFileSystem implementations. 
Contributed By Vinayakumar B.

(cherry picked from commit 0ef796174ecb5383f79cfecfcbfc4f309d093cd7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c73d8399
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c73d8399
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c73d8399

Branch: refs/heads/branch-2
Commit: c73d8399ff8f1f4943c300b5398bd79637525699
Parents: 0478597
Author: Brahma Reddy Battula 
Authored: Thu Dec 8 18:57:43 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Dec 8 18:58:54 2016 +0530

--
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  7 +++
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java | 12 +++-
 2 files changed, 18 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c73d8399/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 3f9aaa4..41429ac 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 
@@ -235,6 +236,12 @@ public class FilterFileSystem extends FileSystem {
   }
 
   @Override
+  protected void rename(Path src, Path dst, Rename... options)
+  throws IOException {
+fs.rename(src, dst, options);
+  }
+
+  @Override
   public boolean truncate(Path f, final long newLength) throws IOException {
 return fs.truncate(f, newLength);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c73d8399/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 76edf5e..c72f579 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -64,7 +64,6 @@ public class TestFilterFileSystem {
 public FSDataOutputStream append(Path f, int bufferSize) throws
 IOException;
 public long getLength(Path f);
-public void rename(Path src, Path dst, Rename... options);
 public boolean exists(Path f);
 public boolean isDirectory(Path f);
 public boolean isFile(Path f);
@@ -263,6 +262,17 @@ public class TestFilterFileSystem {
 verify(mockFs).setWriteChecksum(eq(true));
   }
 
+  @Test
+  public void testRenameOptions() throws Exception {
+FileSystem mockFs = mock(FileSystem.class);
+FileSystem fs = new FilterFileSystem(mockFs);
+Path src = new Path("/src");
+Path dst = new Path("/dest");
+Rename opt = Rename.TO_TRASH;
+fs.rename(src, dst, opt);
+verify(mockFs).rename(eq(src), eq(dst), eq(opt));
+  }
+
   private void checkInit(FilterFileSystem fs, boolean expectInit)
   throws Exception {
 URI uri = URI.create("filter:/");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HADOOP-13867. FilterFileSystem should override rename(.., options) to take effect of Rename options called via FilterFileSystem implementations. Contributed By Vinayakumar B.

2016-12-08 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0478597ea -> c73d8399f
  refs/heads/trunk 4c2cf5560 -> 0ef796174


HADOOP-13867. FilterFileSystem should override rename(.., options) to take 
effect of Rename options called via FilterFileSystem implementations. 
Contributed By Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ef79617
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ef79617
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ef79617

Branch: refs/heads/trunk
Commit: 0ef796174ecb5383f79cfecfcbfc4f309d093cd7
Parents: 4c2cf55
Author: Brahma Reddy Battula 
Authored: Thu Dec 8 18:57:43 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Dec 8 18:57:43 2016 +0530

--
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  7 +++
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java | 12 +++-
 2 files changed, 18 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ef79617/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 3f9aaa4..41429ac 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 
@@ -235,6 +236,12 @@ public class FilterFileSystem extends FileSystem {
   }
 
   @Override
+  protected void rename(Path src, Path dst, Rename... options)
+  throws IOException {
+fs.rename(src, dst, options);
+  }
+
+  @Override
   public boolean truncate(Path f, final long newLength) throws IOException {
 return fs.truncate(f, newLength);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ef79617/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 24f3dc8..4cbb8ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -64,7 +64,6 @@ public class TestFilterFileSystem {
 public FSDataOutputStream append(Path f, int bufferSize) throws
 IOException;
 public long getLength(Path f);
-public void rename(Path src, Path dst, Rename... options);
 public boolean exists(Path f);
 public boolean isDirectory(Path f);
 public boolean isFile(Path f);
@@ -264,6 +263,17 @@ public class TestFilterFileSystem {
 verify(mockFs).setWriteChecksum(eq(true));
   }
 
+  @Test
+  public void testRenameOptions() throws Exception {
+FileSystem mockFs = mock(FileSystem.class);
+FileSystem fs = new FilterFileSystem(mockFs);
+Path src = new Path("/src");
+Path dst = new Path("/dest");
+Rename opt = Rename.TO_TRASH;
+fs.rename(src, dst, opt);
+verify(mockFs).rename(eq(src), eq(dst), eq(opt));
+  }
+
   private void checkInit(FilterFileSystem fs, boolean expectInit)
   throws Exception {
 URI uri = URI.create("filter:/");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11223. Fix typos in HttpFs documentations. Contributed by Yiqun Lin.

2016-12-08 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 59b0857c6 -> 39bf84039


HDFS-11223. Fix typos in HttpFs documentations. Contributed by Yiqun Lin.

(cherry picked from commit 4c2cf5560f6d952cfa36ef656f0b04dc3150f8b3)
(cherry picked from commit 0478597ea9ac1a0ccfa28f5faf017b78c93d16ab)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39bf8403
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39bf8403
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39bf8403

Branch: refs/heads/branch-2.8
Commit: 39bf8403980f2df90a4b0d56879bae8e9ed7b103
Parents: 59b0857
Author: Akira Ajisaka 
Authored: Thu Dec 8 20:52:24 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 8 20:53:36 2016 +0900

--
 .../hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm | 2 +-
 .../hadoop-hdfs-httpfs/src/site/markdown/index.md  | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39bf8403/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
index ffc8f50..d7a985b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
@@ -50,7 +50,7 @@ IMPORTANT: Replace `#HTTPFSUSER#` with the Unix user that 
will start the HttpFS
 Restart Hadoop
 --
 
-You need to restart Hadoop for the proxyuser configuration ot become active.
+You need to restart Hadoop for the proxyuser configuration to become active.
 
 Start/Stop HttpFS
 -

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39bf8403/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
index 6da32f6..8d6625f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
@@ -15,7 +15,7 @@
 Hadoop HDFS over HTTP - Documentation Sets
 ==
 
-HttpFS is a server that provides a REST HTTP gateway supporting all HDFS File 
System operations (read and write). And it is inteoperable with the **webhdfs** 
REST HTTP API.
+HttpFS is a server that provides a REST HTTP gateway supporting all HDFS File 
System operations (read and write). And it is interoperable with the 
**webhdfs** REST HTTP API.
 
 HttpFS can be used to transfer data between clusters running different 
versions of Hadoop (overcoming RPC versioning issues), for example using Hadoop 
DistCP.
 
@@ -23,9 +23,9 @@ HttpFS can be used to access data in HDFS on a cluster behind 
of a firewall (the
 
 HttpFS can be used to access data in HDFS using HTTP utilities (such as curl 
and wget) and HTTP libraries Perl from other languages than Java.
 
-The **webhdfs** client FileSytem implementation can be used to access HttpFS 
using the Hadoop filesystem command (`hadoop fs`) line tool as well as from 
Java applications using the Hadoop FileSystem Java API.
+The **webhdfs** client FileSystem implementation can be used to access HttpFS 
using the Hadoop filesystem command (`hadoop fs`) line tool as well as from 
Java applications using the Hadoop FileSystem Java API.
 
-HttpFS has built-in security supporting Hadoop pseudo authentication and HTTP 
SPNEGO Kerberos and other pluggable authentication mechanims. It also provides 
Hadoop proxy user support.
+HttpFS has built-in security supporting Hadoop pseudo authentication and HTTP 
SPNEGO Kerberos and other pluggable authentication mechanisms. It also provides 
Hadoop proxy user support.
 
 How Does HttpFS Works?
 --


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11223. Fix typos in HttpFs documentations. Contributed by Yiqun Lin.

2016-12-08 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e00a124cb -> 0478597ea


HDFS-11223. Fix typos in HttpFs documentations. Contributed by Yiqun Lin.

(cherry picked from commit 4c2cf5560f6d952cfa36ef656f0b04dc3150f8b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0478597e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0478597e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0478597e

Branch: refs/heads/branch-2
Commit: 0478597ea9ac1a0ccfa28f5faf017b78c93d16ab
Parents: e00a124
Author: Akira Ajisaka 
Authored: Thu Dec 8 20:52:24 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 8 20:53:09 2016 +0900

--
 .../hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm | 2 +-
 .../hadoop-hdfs-httpfs/src/site/markdown/index.md  | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0478597e/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
index 0cb89de..4b66732 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
@@ -50,7 +50,7 @@ IMPORTANT: Replace `#HTTPFSUSER#` with the Unix user that 
will start the HttpFS
 Restart Hadoop
 --
 
-You need to restart Hadoop for the proxyuser configuration ot become active.
+You need to restart Hadoop for the proxyuser configuration to become active.
 
 Start/Stop HttpFS
 -

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0478597e/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
index bc65b93..3a7cf56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
@@ -15,7 +15,7 @@
 Hadoop HDFS over HTTP - Documentation Sets
 ==
 
-HttpFS is a server that provides a REST HTTP gateway supporting all HDFS File 
System operations (read and write). And it is inteoperable with the **webhdfs** 
REST HTTP API.
+HttpFS is a server that provides a REST HTTP gateway supporting all HDFS File 
System operations (read and write). And it is interoperable with the 
**webhdfs** REST HTTP API.
 
 HttpFS can be used to transfer data between clusters running different 
versions of Hadoop (overcoming RPC versioning issues), for example using Hadoop 
DistCP.
 
@@ -23,9 +23,9 @@ HttpFS can be used to access data in HDFS on a cluster behind 
of a firewall (the
 
 HttpFS can be used to access data in HDFS using HTTP utilities (such as curl 
and wget) and HTTP libraries Perl from other languages than Java.
 
-The **webhdfs** client FileSytem implementation can be used to access HttpFS 
using the Hadoop filesystem command (`hadoop fs`) line tool as well as from 
Java applications using the Hadoop FileSystem Java API.
+The **webhdfs** client FileSystem implementation can be used to access HttpFS 
using the Hadoop filesystem command (`hadoop fs`) line tool as well as from 
Java applications using the Hadoop FileSystem Java API.
 
-HttpFS has built-in security supporting Hadoop pseudo authentication and HTTP 
SPNEGO Kerberos and other pluggable authentication mechanims. It also provides 
Hadoop proxy user support.
+HttpFS has built-in security supporting Hadoop pseudo authentication and HTTP 
SPNEGO Kerberos and other pluggable authentication mechanisms. It also provides 
Hadoop proxy user support.
 
 How Does HttpFS Works?
 --


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11223. Fix typos in HttpFs documentations. Contributed by Yiqun Lin.

2016-12-08 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 74d0066d3 -> 4c2cf5560


HDFS-11223. Fix typos in HttpFs documentations. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c2cf556
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c2cf556
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c2cf556

Branch: refs/heads/trunk
Commit: 4c2cf5560f6d952cfa36ef656f0b04dc3150f8b3
Parents: 74d0066
Author: Akira Ajisaka 
Authored: Thu Dec 8 20:52:24 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 8 20:52:24 2016 +0900

--
 .../hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm | 2 +-
 .../hadoop-hdfs-httpfs/src/site/markdown/index.md  | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c2cf556/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
index 0cb89de..4b66732 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
@@ -50,7 +50,7 @@ IMPORTANT: Replace `#HTTPFSUSER#` with the Unix user that 
will start the HttpFS
 Restart Hadoop
 --
 
-You need to restart Hadoop for the proxyuser configuration ot become active.
+You need to restart Hadoop for the proxyuser configuration to become active.
 
 Start/Stop HttpFS
 -

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c2cf556/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
index 750b7f4..145feb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
@@ -15,7 +15,7 @@
 Hadoop HDFS over HTTP - Documentation Sets
 ==
 
-HttpFS is a server that provides a REST HTTP gateway supporting all HDFS File 
System operations (read and write). And it is inteoperable with the **webhdfs** 
REST HTTP API.
+HttpFS is a server that provides a REST HTTP gateway supporting all HDFS File 
System operations (read and write). And it is interoperable with the 
**webhdfs** REST HTTP API.
 
 HttpFS can be used to transfer data between clusters running different 
versions of Hadoop (overcoming RPC versioning issues), for example using Hadoop 
DistCP.
 
@@ -23,9 +23,9 @@ HttpFS can be used to access data in HDFS on a cluster behind 
of a firewall (the
 
 HttpFS can be used to access data in HDFS using HTTP utilities (such as curl 
and wget) and HTTP libraries Perl from other languages than Java.
 
-The **webhdfs** client FileSytem implementation can be used to access HttpFS 
using the Hadoop filesystem command (`hadoop fs`) line tool as well as from 
Java applications using the Hadoop FileSystem Java API.
+The **webhdfs** client FileSystem implementation can be used to access HttpFS 
using the Hadoop filesystem command (`hadoop fs`) line tool as well as from 
Java applications using the Hadoop FileSystem Java API.
 
-HttpFS has built-in security supporting Hadoop pseudo authentication and HTTP 
SPNEGO Kerberos and other pluggable authentication mechanims. It also provides 
Hadoop proxy user support.
+HttpFS has built-in security supporting Hadoop pseudo authentication and HTTP 
SPNEGO Kerberos and other pluggable authentication mechanisms. It also provides 
Hadoop proxy user support.
 
 How Does HttpFS Works?
 --


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5970. Validate application update timeout request parameters. Contributed by Rohith Sharma K S.

2016-12-08 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9ef89ede2 -> 74d0066d3


YARN-5970. Validate application update timeout request parameters. Contributed 
by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74d0066d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74d0066d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74d0066d

Branch: refs/heads/trunk
Commit: 74d0066d3392169bec872f438a0818e2f5323010
Parents: 9ef89ed
Author: Sunil G 
Authored: Thu Dec 8 15:53:56 2016 +0530
Committer: Sunil G 
Committed: Thu Dec 8 15:53:56 2016 +0530

--
 .../java/org/apache/hadoop/yarn/util/Times.java |  3 ++
 .../server/resourcemanager/RMServerUtils.java   |  3 +-
 .../resourcemanager/webapp/RMWebServices.java   | 10 ++---
 .../TestRMWebServicesAppsModification.java  | 39 ++--
 4 files changed, 38 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74d0066d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
index f113bd3..3c41558 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
@@ -105,6 +105,9 @@ public class Times {
*/
   public static long parseISO8601ToLocalTimeInMillis(String isoString)
   throws ParseException {
+if (isoString == null) {
+  throw new ParseException("Invalid input.", -1);
+}
 return isoFormat.get().parse(isoString).getTime();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74d0066d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index a0cdf68..74898ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -507,7 +507,8 @@ public class RMServerUtils {
 } catch (ParseException ex) {
   String message =
   "Expire time is not in ISO8601 format. ISO8601 supported "
-  + "format is -MM-dd'T'HH:mm:ss.SSSZ";
+  + "format is -MM-dd'T'HH:mm:ss.SSSZ. Configured "
+  + "timeout value is " + timeout.getValue();
   throw new YarnException(message, ex);
 }
 if (expireTime < currentTimeMillis) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74d0066d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index a46fb81..bd0602b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -2434,7 +2434,7 @@ public class RMWebServices extends WebServices {
   }
 
   @GET
-  @Path("/apps/{appid}/timeout/{type}")
+  @Path("/apps/{appid}/timeouts/{type}")
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
   MediaType.APPLICATION_XML