hadoop git commit: HADOOP-10101. Update guava dependency to the latest version. (ozawa)

2017-03-24 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1f6652443 -> 84ddedc0b


HADOOP-10101. Update guava dependency to the latest version. (ozawa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84ddedc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84ddedc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84ddedc0

Branch: refs/heads/trunk
Commit: 84ddedc0b2d58257d45c16ee5e83b15f94a7ba3a
Parents: 1f66524
Author: Tsuyoshi Ozawa 
Authored: Sat Mar 25 10:06:52 2017 +0900
Committer: Tsuyoshi Ozawa 
Committed: Sat Mar 25 10:06:52 2017 +0900

--
 .../main/java/org/apache/hadoop/fs/shell/XAttrCommands.java| 2 +-
 .../main/java/org/apache/hadoop/ha/ZKFailoverController.java   | 3 +--
 .../main/java/org/apache/hadoop/metrics2/AbstractMetric.java   | 3 ++-
 .../src/main/java/org/apache/hadoop/metrics2/MetricsTag.java   | 3 ++-
 .../org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java | 3 ++-
 .../src/main/java/org/apache/hadoop/metrics2/impl/MsInfo.java  | 4 ++--
 .../java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java   | 3 ++-
 .../java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java   | 4 ++--
 .../java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java | 6 +++---
 .../java/org/apache/hadoop/metrics2/util/MetricsCache.java | 4 ++--
 .../main/java/org/apache/hadoop/util/curator/ChildReaper.java  | 4 ++--
 .../org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java | 4 ++--
 .../org/apache/hadoop/hdfs/server/datanode/DataStorage.java| 2 +-
 .../apache/hadoop/hdfs/server/namenode/AclTransformation.java  | 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/JournalSet.java | 2 +-
 .../apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java  | 2 +-
 .../hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java  | 2 +-
 hadoop-project/pom.xml | 2 +-
 .../records/impl/pb/ApplicationSubmissionContextPBImpl.java| 2 +-
 .../src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java| 4 ++--
 .../hadoop/yarn/server/resourcemanager/RMAppManager.java   | 6 +++---
 .../scheduler/fair/AllocationFileLoaderService.java| 2 +-
 .../server/resourcemanager/scheduler/fair/QueueManager.java| 2 +-
 23 files changed, 38 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84ddedc0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
index d55c80b..4505aa9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
@@ -67,7 +67,7 @@ class XAttrCommands extends FsCommand {
   "0x and 0s, respectively.\n" +
   ": The file or directory.\n";
 private final static Function enValueOfFunc =
-Enums.valueOfFunction(XAttrCodec.class);
+Enums.stringConverter(XAttrCodec.class);
 
 private String name = null;
 private boolean dump = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84ddedc0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 0ed9158..055bcaa 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -55,7 +55,6 @@ import org.apache.zookeeper.data.ACL;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 @InterfaceAudience.LimitedPrivate("HDFS")
@@ -511,7 +510,7 @@ public abstract class ZKFailoverController {
   doFence(target);
 } catch (Throwable t) {
   recordActiveAttempt(new ActiveAttemptRecord(false, "Unable to fence old 
active: " + StringUtils.stringifyException(t)));
-  Throwables.propagate(t);
+  throw t;
 }
   }
   


svn commit: r18902 - /release/hadoop/common/hadoop-2.6.0/

2017-03-24 Thread junping_du
Author: junping_du
Date: Sat Mar 25 00:36:59 2017
New Revision: 18902

Log:
Removing the old bits (2.6.0) in cache.

Removed:
release/hadoop/common/hadoop-2.6.0/


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r18901 - /release/hadoop/common/hadoop-2.5.2/

2017-03-24 Thread junping_du
Author: junping_du
Date: Sat Mar 25 00:36:36 2017
New Revision: 18901

Log:
Removing the old bits in cache.

Removed:
release/hadoop/common/hadoop-2.5.2/


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r18900 [5/5] - /release/hadoop/common/hadoop-2.8.0/

2017-03-24 Thread junping_du
Added: release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.asc
==
--- release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.asc (added)
+++ release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.asc Sat Mar 25 
00:33:07 2017
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIcBAABCgAGBQJYy5tvAAoJEOlrzVvTPRlcmdUP/jOlN8MoS5LqRVa/C+VKngce
+xdzhJHEQecsLb/6NVMBk67OV8fXXuy7v+mIY5NTnMFUZAl3eSKJoKiKLpsxko1fm
+p2Skjaepn7fgtkX1bkQf4e5WouIHlxt6HQYYs+tlz0iKRnUd0fTzfN0LUAYgzA62
+A/3PIco10t4oVELa4sT6AC16Pdzg/btH47ECPGf5vVnxIt10MHH9U8sFhZGFq9aK
+R7c1EwvL8eeXclvgoZMTcfXE3wRkTdYWBDQW2RtQn70t07XNqv5NaVynET9aUgA0
+09zbcmEsKz1Bpg+8GdNRt4I8/RIw657H4ykN+586+Mg3HEWqBYHBpsR7zyy41sDy
+tMGlJALjsqNMUxAfXAAncp0fANmTqajRLVwEx22yBS7bbDgxyS34KsnQ6h1eiQ7M
+eeginX+PnofHeIQaZ78nPe6G4199YebEvUg4pn0IrNjJLQcRucLQngZ1dtUMTHO2
+50Jx5MB+uJ6VRnRInsOQG0SsKOI9iG6XyyDKN6JZ1c44BUcLemLO6P8L0uaqMrNO
+UHduBrqnFsg5JM5xN3b+gKMmdAr1RZtLN6kPcIb2KbrZTQSsfY19Xl/heK93RQu2
+X6wVHPSnojTDLQMez4IY8K0SMSglqTfI0cHKbJp57r8OuYuQUYrZB83LyHLQhHLH
+D1nsPs8WGkis9P8Sfv+6
+=T1eW
+-END PGP SIGNATURE-

Added: release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.md5
==
--- release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.md5 (added)
+++ release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.md5 Sat Mar 25 
00:33:07 2017
@@ -0,0 +1,2 @@
+$ /usr/bin/md5sum /build/source/target/artifacts/hadoop-2.8.0-rat.txt
+49bfd248ce828dc0dc9a210367bd6f85  
/build/source/target/artifacts/hadoop-2.8.0-rat.txt

Added: release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.mds
==
--- release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.mds (added)
+++ release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.mds Sat Mar 25 
00:33:07 2017
@@ -0,0 +1,16 @@
+/build/source/target/artifacts/hadoop-2.8.0-rat.txt: 
+   MD5 = 49 BF D2 48 CE 82 8D C0  DC 9A 21 03 67 BD 6F 85
+/build/source/target/artifacts/hadoop-2.8.0-rat.txt: 
+  SHA1 = 272A 472A 98D4 3CBE 8FBB  14E8 6042 0775 99DD 9DF9
+/build/source/target/artifacts/hadoop-2.8.0-rat.txt: 
+RMD160 = F0E7 8B4E F832 4992 0616  47D7 5DAA 4802 D473 D094
+/build/source/target/artifacts/hadoop-2.8.0-rat.txt: 
+SHA224 = F05D59AD 7CC8949E 350FCC0D 8307C68F 804874DB DF5468BC 429F9E29
+/build/source/target/artifacts/hadoop-2.8.0-rat.txt: 
+SHA256 = 9F5EAC29 BA33DB03 BCA6E725 221000DE 8EC0F0CE 1D7ED003 A467DA13 
7E510D6E
+/build/source/target/artifacts/hadoop-2.8.0-rat.txt: 
+SHA384 = 620A2E94 72E6479C 8F2853B5 F712F368 97E8E4B1 07F71DDF 87C56F17 
4C0BE715
+ 8FB16104 448D8934 24E9D01D 30887E42
+/build/source/target/artifacts/hadoop-2.8.0-rat.txt: 
+SHA512 = FC4C071F 1C5304F9 6C45AC42 496E8EFC 32B4BE95 C6161962 56D89C98 
A821574E
+ 56CF7587 7639A447 BA5D205D EE63DAFE E7B9A9D4 71612039 3A039D22 
950ED07E

Added: release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz
--
svn:mime-type = application/octet-stream

Added: release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz.asc
==
--- release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz.asc (added)
+++ release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz.asc Sat Mar 25 
00:33:07 2017
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIcBAABCgAGBQJYy5twAAoJEOlrzVvTPRlcyUoP/0lTc/9O4Bj1ubpVOVcDS9CZ
+3jJ+6gKircOLeImrJzKhthzPf29NaYOJBW9/71j6ucWUVj8FHWAfqkm7woE9X03E
+faAL33Vr2qdwF81CRqd2rkyjKrkboJlmSe6sPMeECJjrGIgBeWma0RF7CVQtbAMx
+rhCxE42F9QA8lGnqD4X83aJG/itEoUxpVMUpBLIGWDjiIMNZCjglq6pANgU9HyTg
+vE6ASzv0pJrTpijELkCZeuRn1d82czm4FjrRQgMmgM1CKTYXvipR3sQv7KTs5J8t
+hdQGNYTMkxYQaWiMoDXNQdDwdWxdMrWtcVypA3ttRIW8c2ZNBj5m/KWi+B+O6k80
+QM43lAsfuowMjPlgjYYGPfNGV2EV8yKNwF6RomOO4HbJmk37B8CCAbddeBtt6uyv
+TGn8I0avuxj32RUhW58diY1mRrcWudcNx2Ryp2dXoZxhL2h36NMjLXNYZZWsmTz2
+4Ow9+tLtEargEM1ef+ij0CF7KFixTU4InEK01s30XDhCAm139Sl8rfBU4GVTnAq4
+tYnUVNqJx3f337oAos3P35oXbp4VU+UZdL3Q0Ubg84qapVPP7R0XmJn04jweCDOs
+w/DQQ7aPjXwb9Qt4au/ABRUGOQX/zHpRNxamznLeDHZ0k+XFDQsHnkSzLomiiRAU
+YSxvwoytHcv09Ql5TLbF
+=GVrB
+-END PGP SIGNATURE-

Added: release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz.md5
==
--- release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz.md5 (added)
+++ release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz.md5 Sat Mar 25 
00:33:07 2017
@@ -0,0 +1,2 @@
+$ /usr/bin/md5sum /build/source/target/artifacts/hadoop-2.8.0-site.tar.gz
+2c6078a0d92e331ecca9c423c7f8f127  

svn commit: r18900 [3/5] - /release/hadoop/common/hadoop-2.8.0/

2017-03-24 Thread junping_du
Added: release/hadoop/common/hadoop-2.8.0/CHANGES.md.asc
==
--- release/hadoop/common/hadoop-2.8.0/CHANGES.md.asc (added)
+++ release/hadoop/common/hadoop-2.8.0/CHANGES.md.asc Sat Mar 25 00:33:07 2017
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIcBAABCgAGBQJYy5toAAoJEOlrzVvTPRlcQ2IP/RykeWpDhN0YNI/mqqBX6kjo
+3uG5tPJXOf4tiTukTbKSmG1UzLNBPO4XuH0KVu3gMjErB1o1P4Qgcp9qIfCKqAb6
+FiDegBmFR9yK848JLKJ4lbkgbimHWbpK6avQip2+CaNRnVn3+TLZvxQnBuSAA6kA
+G2pmItCootmd9yirMtw9AwftIZ+mgB6MB0AthViV0J7wTIknItDR7aYnD4prxZMP
+lyFpevPYGzgHR8YgjuMpzQTkgtbcPlLdONzkc30y1xtJ1i7P+RSNv856c+H4fz7l
+pkDI/zj5jarnQ2opqpbrnzjiUsNl74osL7hnZ6pzEP1pd8u1rXPA5WsOvFA0NPOw
+XZzjRtbBJhSFLGEi5jXaNJ1OTYxsIDRhWV1sPYZuhdrz/X88m1Dsd28ph49klD0n
+AqA0LOXH0LfPY8HxTIMLJK9VHHKYCk6qN67hPMh8E3JG5EvdrXJjX1a3Ku76tnXR
+aQHGoTGTpkpY8a8P8VYnW+RHq7QsywNjigUzO7YzGxOLwoWk17CnhiVj0MS3ebRG
+tzqrsEfntg0eNykbTVGT3N+U5K0DFIVo/8fGcBB6ll4X2qkFZs0M4sW2Q1+0662h
+hvolsJwvncx8EQPmN+fnm+K1GMPpNtvzXZDG5NYsgbHXti/4dTFtaT0yS0mwtQfo
+grPuUJzL9rGh4cXGkl7k
+=XZP2
+-END PGP SIGNATURE-

Added: release/hadoop/common/hadoop-2.8.0/CHANGES.md.md5
==
--- release/hadoop/common/hadoop-2.8.0/CHANGES.md.md5 (added)
+++ release/hadoop/common/hadoop-2.8.0/CHANGES.md.md5 Sat Mar 25 00:33:07 2017
@@ -0,0 +1,2 @@
+$ /usr/bin/md5sum /build/source/target/artifacts/CHANGES.md
+376155c468f86a527a887555bd3755ba  /build/source/target/artifacts/CHANGES.md

Added: release/hadoop/common/hadoop-2.8.0/CHANGES.md.mds
==
--- release/hadoop/common/hadoop-2.8.0/CHANGES.md.mds (added)
+++ release/hadoop/common/hadoop-2.8.0/CHANGES.md.mds Sat Mar 25 00:33:07 2017
@@ -0,0 +1,16 @@
+/build/source/target/artifacts/CHANGES.md: 
+   MD5 = 37 61 55 C4 68 F8 6A 52  7A 88 75 55 BD 37 55 BA
+/build/source/target/artifacts/CHANGES.md: 
+  SHA1 = 1C18 57B6 C684 76C6 EF99  F577 F514 5C5E 2A4D 09DC
+/build/source/target/artifacts/CHANGES.md: 
+RMD160 = 8AA9 B545 131B 6747 4B58  745A 1402 FA07 27BC 5F04
+/build/source/target/artifacts/CHANGES.md: 
+SHA224 = 5172166A E59B61F6 2FCC91A5 1397A390 A9881D9D 6BB30ED7 A3F2A7D7
+/build/source/target/artifacts/CHANGES.md: 
+SHA256 = E967F4E7 51A0845F EB88DCCA 6CD4B61A D0546769 0AEC0471 89661C02 
90FA8570
+/build/source/target/artifacts/CHANGES.md: 
+SHA384 = 3030F392 698DDB77 8072EA25 1D02381C 061520A8 055EBA54 94C00BD5 
C5C8DDFD
+ F1DFE49B E95F44CC 14A5E895 A20785A5
+/build/source/target/artifacts/CHANGES.md: 
+SHA512 = BC437A6E 4ED509DF B78E0726 601EAF15 9CBF267C FA5E1C53 68DC1819 
4854AADF
+ 01E49AF8 AF81A4BF 64CCFD1E C780BB07 AFB83AB4 74BD9F29 4DE6C603 
796720CA

Added: release/hadoop/common/hadoop-2.8.0/RELEASENOTES.md
==
--- release/hadoop/common/hadoop-2.8.0/RELEASENOTES.md (added)
+++ release/hadoop/common/hadoop-2.8.0/RELEASENOTES.md Sat Mar 25 00:33:07 2017
@@ -0,0 +1,1105 @@
+
+
+# "Apache Hadoop"  2.8.0 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-7713](https://issues.apache.org/jira/browse/HADOOP-7713) | *Trivial* 
| **dfs -count -q should label output column**
+
+Added -v option to fs -count command to display a header record in the report.
+
+
+---
+
+* [HADOOP-8934](https://issues.apache.org/jira/browse/HADOOP-8934) | *Minor* | 
**Shell command ls should include sort options**
+
+Options to sort output of fs -ls comment: -t (mtime), -S (size), -u (atime), 
-r (reverse)
+
+
+---
+
+* [HADOOP-11226](https://issues.apache.org/jira/browse/HADOOP-11226) | *Major* 
| **Add a configuration to set ipc.Client's traffic class with 
IPTOS\_LOWDELAY\|IPTOS\_RELIABILITY**
+
+Use low latency TCP connections for hadoop IPC
+
+
+---
+
+* [HADOOP-9477](https://issues.apache.org/jira/browse/HADOOP-9477) | *Major* | 
**Add posixGroups support for LDAP groups mapping service**
+
+Add posixGroups support for LDAP groups mapping service. The change in 
LDAPGroupMapping is compatible with previous scenario. In LDAP, the group 
mapping between {{posixAccount}} and {{posixGroup}} is different from the 
general LDAPGroupMapping, one of the differences is the {{"memberUid"}} will be 
used to mapping {{posixAccount}} and {{posixGroup}}. The feature will handle 
the mapping in internal when configuration 
{{hadoop.security.group.mapping.ldap.search.filter.user}} is set as 
"posixAccount" and {{hadoop.security.group.mapping.ldap.search.filter.group}} 
is "posixGroup".
+
+
+---
+
+* [YARN-3241](https://issues.apache.org/jira/browse/YARN-3241) | *Major* | 
**FairScheduler handles "invalid" queue names inconsistently**
+
+FairScheduler does not allow queue names with leading or tailing spaces or 
empty sub-queue names anymore.
+
+
+---
+
+* 

svn commit: r18900 [1/5] - /release/hadoop/common/hadoop-2.8.0/

2017-03-24 Thread junping_du
Author: junping_du
Date: Sat Mar 25 00:33:07 2017
New Revision: 18900

Log:
Publishing the bits for release 2.8.0

Added:
release/hadoop/common/hadoop-2.8.0/
release/hadoop/common/hadoop-2.8.0/CHANGES.md
release/hadoop/common/hadoop-2.8.0/CHANGES.md.asc
release/hadoop/common/hadoop-2.8.0/CHANGES.md.md5
release/hadoop/common/hadoop-2.8.0/CHANGES.md.mds
release/hadoop/common/hadoop-2.8.0/RELEASENOTES.md
release/hadoop/common/hadoop-2.8.0/RELEASENOTES.md.asc
release/hadoop/common/hadoop-2.8.0/RELEASENOTES.md.md5
release/hadoop/common/hadoop-2.8.0/RELEASENOTES.md.mds
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.asc
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.md5
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt.mds
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz   (with props)
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz.asc
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz.md5
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-site.tar.gz.mds
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-src.tar.gz   (with props)
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-src.tar.gz.asc
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-src.tar.gz.md5
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-src.tar.gz.mds
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz.asc
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz.md5
release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz.mds


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r18900 [4/5] - /release/hadoop/common/hadoop-2.8.0/

2017-03-24 Thread junping_du
Added: release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt
==
--- release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt (added)
+++ release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-rat.txt Sat Mar 25 00:33:07 
2017
@@ -0,0 +1,10792 @@
+
+*
+Summary
+---
+Generated at: 2017-03-17T03:58:13+00:00
+Notes: 0
+Binaries: 0
+Archives: 0
+Standards: 11
+
+Apache Licensed: 11
+Generated Documents: 0
+
+JavaDocs are generated and so license header is optional
+Generated files do not required license headers
+
+0 Unknown Licenses
+
+***
+
+Unapproved licenses:
+
+
+***
+
+Archives:
+
+*
+  Files with Apache License headers will be marked AL
+  Binary files (which do not require AL headers) will be marked B
+  Compressed archives will be marked A
+  Notices, licenses etc will be marked N
+  AL/build/source/hadoop-assemblies/pom.xml
+  AL
/build/source/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
+  AL
/build/source/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
+  AL
/build/source/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
+  AL
/build/source/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
+  AL
/build/source/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
+  AL
/build/source/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml
+  AL
/build/source/hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml
+  AL
/build/source/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
+  AL
/build/source/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
+  AL
/build/source/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+ 
+*
+ Printing headers for files without AL header...
+ 
+ 
+*
+Summary
+---
+Generated at: 2017-03-17T03:58:13+00:00
+Notes: 0
+Binaries: 0
+Archives: 0
+Standards: 3
+
+Apache Licensed: 3
+Generated Documents: 0
+
+JavaDocs are generated and so license header is optional
+Generated files do not required license headers
+
+0 Unknown Licenses
+
+***
+
+Unapproved licenses:
+
+
+***
+
+Archives:
+
+*
+  Files with Apache License headers will be marked AL
+  Binary files (which do not require AL headers) will be marked B
+  Compressed archives will be marked A
+  Notices, licenses etc will be marked N
+  AL/build/source/hadoop-build-tools/pom.xml
+  AL
/build/source/hadoop-build-tools/src/main/resources/checkstyle/checkstyle-noframes-sorted.xsl
+  AL
/build/source/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
+ 
+*
+ Printing headers for files without AL header...
+ 
+ 
+*
+Summary
+---
+Generated at: 2017-03-17T03:59:42+00:00
+Notes: 0
+Binaries: 0
+Archives: 0
+Standards: 1
+
+Apache Licensed: 1
+Generated Documents: 0
+
+JavaDocs are generated and so license header is optional
+Generated files do not required license headers
+
+0 Unknown Licenses
+
+***
+
+Unapproved licenses:
+
+
+***
+
+Archives:
+
+*
+  Files with Apache License headers will be marked AL
+  Binary files (which do not require AL headers) will be marked B
+  Compressed archives will be marked A
+  Notices, licenses etc will be marked N
+  AL/build/source/hadoop-client/pom.xml
+ 
+*
+ Printing headers for files without AL header...
+ 
+ 
+*
+Summary
+---
+Generated at: 2017-03-17T03:58:13+00:00
+Notes: 0
+Binaries: 0
+Archives: 0
+Standards: 10
+
+Apache Licensed: 10
+Generated Documents: 0
+
+JavaDocs are generated and so license header is optional
+Generated files do not required license headers
+
+0 Unknown Licenses
+
+***
+
+Unapproved licenses:
+
+
+***
+
+Archives:
+
+*
+  Files with Apache License headers will be marked AL
+  Binary files (which do not require AL headers) will be marked B
+  Compressed archives will be marked A
+  Notices, licenses etc will be marked N
+  AL/build/source/hadoop-common-project/hadoop-annotations/pom.xml
+  AL
/build/source/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceAudience.java
+  AL

svn commit: r18900 [2/5] - /release/hadoop/common/hadoop-2.8.0/

2017-03-24 Thread junping_du

Added: release/hadoop/common/hadoop-2.8.0/CHANGES.md
==
--- release/hadoop/common/hadoop-2.8.0/CHANGES.md (added)
+++ release/hadoop/common/hadoop-2.8.0/CHANGES.md Sat Mar 25 00:33:07 2017
@@ -0,0 +1,2986 @@
+
+
+# "Apache Hadoop" Changelog
+
+## Release 2.8.0 - 2017-03-17
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [YARN-3241](https://issues.apache.org/jira/browse/YARN-3241) | FairScheduler 
handles "invalid" queue names inconsistently |  Major | fairscheduler | zhihai 
xu | zhihai xu |
+| [HADOOP-11731](https://issues.apache.org/jira/browse/HADOOP-11731) | Rework 
the changelog and releasenotes |  Major | documentation | Allen Wittenauer | 
Allen Wittenauer |
+| [HADOOP-11746](https://issues.apache.org/jira/browse/HADOOP-11746) | rewrite 
test-patch.sh |  Major | build, test | Allen Wittenauer | Allen Wittenauer |
+| [HDFS-8226](https://issues.apache.org/jira/browse/HDFS-8226) | Non-HA 
rollback compatibility broken |  Blocker | . | J.Andreina | J.Andreina |
+| [HADOOP-11772](https://issues.apache.org/jira/browse/HADOOP-11772) | RPC 
Invoker relies on static ClientCache which has synchronized(this) blocks |  
Major | ipc, performance | Gopal V | Haohui Mai |
+| [YARN-2336](https://issues.apache.org/jira/browse/YARN-2336) | Fair 
scheduler REST api returns a missing '[' bracket JSON for deep queue tree |  
Major | fairscheduler | Kenji Kikushima | Akira Ajisaka |
+| [YARN-41](https://issues.apache.org/jira/browse/YARN-41) | The RM should 
handle the graceful shutdown of the NM. |  Major | nodemanager, resourcemanager 
| Ravi Teja Ch N V | Devaraj K |
+| [HDFS-6564](https://issues.apache.org/jira/browse/HDFS-6564) | Use slf4j 
instead of common-logging in hdfs-client |  Major | build | Haohui Mai | Rakesh 
R |
+| [MAPREDUCE-6427](https://issues.apache.org/jira/browse/MAPREDUCE-6427) | Fix 
typo in JobHistoryEventHandler |  Minor | . | Brahma Reddy Battula | Ray Chiang 
|
+| [HADOOP-12269](https://issues.apache.org/jira/browse/HADOOP-12269) | Update 
aws-sdk dependency to 1.10.6; move to aws-sdk-s3 |  Major | fs/s3 | Thomas 
Demoor | Thomas Demoor |
+| [HDFS-8900](https://issues.apache.org/jira/browse/HDFS-8900) | Compact 
XAttrs to optimize memory footprint. |  Major | namenode | Yi Liu | Yi Liu |
+| [YARN-4087](https://issues.apache.org/jira/browse/YARN-4087) | Followup 
fixes after YARN-2019 regarding RM behavior when state-store error occurs |  
Major | . | Jian He | Jian He |
+| [HADOOP-12416](https://issues.apache.org/jira/browse/HADOOP-12416) | Trash 
messages should be handled by Logger instead of being delivered on System.out | 
 Major | trash | Ashutosh Chauhan | Mingliang Liu |
+| [HDFS-9063](https://issues.apache.org/jira/browse/HDFS-9063) | Correctly 
handle snapshot path for getContentSummary |  Major | namenode | Jing Zhao | 
Jing Zhao |
+| [HDFS-9433](https://issues.apache.org/jira/browse/HDFS-9433) | DFS 
getEZForPath API on a non-existent file should throw FileNotFoundException |  
Major | encryption | Rakesh R | Rakesh R |
+| [HADOOP-11252](https://issues.apache.org/jira/browse/HADOOP-11252) | RPC 
client does not time out by default |  Critical | ipc | Wilfred Spiegelenburg | 
Masatake Iwasaki |
+| [HDFS-9047](https://issues.apache.org/jira/browse/HDFS-9047) | Retire 
libwebhdfs |  Major | webhdfs | Allen Wittenauer | Haohui Mai |
+| [HADOOP-12651](https://issues.apache.org/jira/browse/HADOOP-12651) | Replace 
dev-support with wrappers to Yetus |  Major | scripts | Allen Wittenauer | 
Allen Wittenauer |
+| [HADOOP-12552](https://issues.apache.org/jira/browse/HADOOP-12552) | Fix 
undeclared/unused dependency to httpclient |  Minor | build | Masatake Iwasaki 
| Masatake Iwasaki |
+| [HADOOP-11792](https://issues.apache.org/jira/browse/HADOOP-11792) | Remove 
all of the CHANGES.txt files |  Major | build | Allen Wittenauer | Andrew Wang |
+| [YARN-5035](https://issues.apache.org/jira/browse/YARN-5035) | 
FairScheduler: Adjust maxAssign dynamically when assignMultiple is turned on |  
Major | fairscheduler | Karthik Kambatla | Karthik Kambatla |
+| [HADOOP-12892](https://issues.apache.org/jira/browse/HADOOP-12892) | 
fix/rewrite create-release |  Blocker | build | Allen Wittenauer | Allen 
Wittenauer |
+| [HADOOP-13139](https://issues.apache.org/jira/browse/HADOOP-13139) | 
Branch-2: S3a to use thread pool that blocks clients |  Major | fs/s3 | Pieter 
Reuse | Pieter Reuse |
+| [HADOOP-13382](https://issues.apache.org/jira/browse/HADOOP-13382) | remove 
unneeded commons-httpclient dependencies from POM files in Hadoop and 
sub-projects |  Major | build | Matt Foley | Matt Foley |
+| [HDFS-7933](https://issues.apache.org/jira/browse/HDFS-7933) | fsck should 
also report decommissioning replicas. |  Major | namenode | Jitendra Nath 
Pandey | Xiaoyu Yao |
+| 

hadoop git commit: HADOOP-14211. FilterFs and ChRootedFs are too aggressive about enforcing 'authorityNeeded'. Contributed by Erik Krogen.

2017-03-24 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 4cc53b51f -> 295aab8e3


HADOOP-14211. FilterFs and ChRootedFs are too aggressive about enforcing 
'authorityNeeded'. Contributed by Erik Krogen.

(cherry picked from commit 0e556a5ba645570d381beca60114a1239b27d49f)
(cherry picked from commit 96fe940e59127dc7c3e4182c3ed450c3cd8d858e)
(cherry picked from commit 5130128a31a75587f6ccca08c487e44f73685227)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/295aab8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/295aab8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/295aab8e

Branch: refs/heads/branch-2.7
Commit: 295aab8e3a877f64e1337cab5d4e0888a35336dd
Parents: 4cc53b5
Author: Andrew Wang 
Authored: Fri Mar 24 11:12:02 2017 -0700
Committer: Zhe Zhang 
Committed: Fri Mar 24 11:38:39 2017 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../src/main/java/org/apache/hadoop/fs/FilterFs.java|  3 +--
 .../java/org/apache/hadoop/fs/viewfs/ChRootedFs.java|  3 +--
 .../test/java/org/apache/hadoop/fs/TestFilterFs.java| 12 
 4 files changed, 17 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/295aab8e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5e1e725..9ded0e2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -123,6 +123,9 @@ Release 2.7.4 - UNRELEASED
 HADOOP-9631. ViewFs should use underlying FileSystem's server side 
defaults.
 (Lohit Vijayarenu and Erik Krogen via zhz)
 
+HADOOP-14211. FilterFs and ChRootedFs are too aggressive about enforcing
+"authorityNeeded". (Erik Krogen via wang)
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295aab8e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index 636594e..b13bbe4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -56,8 +56,7 @@ public abstract class FilterFs extends AbstractFileSystem {
   }
   
   protected FilterFs(AbstractFileSystem fs) throws URISyntaxException {
-super(fs.getUri(), fs.getUri().getScheme(),
-fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
+super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort());
 myFs = fs;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295aab8e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index d0f156d..8d46210 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -99,8 +99,7 @@ class ChRootedFs extends AbstractFileSystem {
 
   public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
 throws URISyntaxException {
-super(fs.getUri(), fs.getUri().getScheme(),
-fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
+super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort());
 myFs = fs;
 myFs.checkPath(theRoot);
 chRootPathPart = new Path(myFs.getUriPath(theRoot));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295aab8e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
index 27d093c..a2f0905 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
@@ -25,6 +25,8 @@ import 

[1/2] hadoop git commit: HDFS-11570. Unit test for NameNodeStatusMXBean. Contributed by Hanisha Koneru.

2017-03-24 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 757d9ebcc -> a79ad2d12
  refs/heads/trunk 1168ece59 -> 1f6652443


HDFS-11570. Unit test for NameNodeStatusMXBean. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f665244
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f665244
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f665244

Branch: refs/heads/trunk
Commit: 1f66524432edf37e28ec03aba8d12b22ce0ef60c
Parents: 1168ece
Author: Hanisha Koneru 
Authored: Fri Mar 24 14:44:25 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Mar 24 14:44:25 2017 -0700

--
 .../namenode/TestNameNodeStatusMXBean.java  | 93 
 1 file changed, 93 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f665244/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
new file mode 100644
index 000..c03dc20
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.TestDataNodeMXBean;
+import org.junit.Assert;
+import org.junit.Test;
+
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+import java.lang.management.ManagementFactory;
+
+/**
+ * Class for testing {@link NameNodeStatusMXBean} implementation.
+ */
+public class TestNameNodeStatusMXBean {
+
+  public static final Log LOG = LogFactory.getLog(
+  TestNameNodeStatusMXBean.class);
+
+  @Test(timeout = 12L)
+  public void testDataNodeMXBean() throws Exception {
+Configuration conf = new Configuration();
+MiniDFSCluster cluster = null;
+
+try {
+  cluster = new MiniDFSCluster.Builder(conf).build();
+  cluster.waitActive();
+
+  NameNode nn = cluster.getNameNode();
+
+  MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+  ObjectName mxbeanName = new ObjectName(
+  "Hadoop:service=NameNode,name=NameNodeStatus");
+
+  // Get attribute "NNRole"
+  String nnRole = (String)mbs.getAttribute(mxbeanName, "NNRole");
+  Assert.assertEquals(nn.getNNRole(), nnRole);
+
+  // Get attribute "State"
+  String state = (String)mbs.getAttribute(mxbeanName, "State");
+  Assert.assertEquals(nn.getState(), state);
+
+  // Get attribute "HostAndPort"
+  String hostAndPort = (String)mbs.getAttribute(mxbeanName, "HostAndPort");
+  Assert.assertEquals(nn.getHostAndPort(), hostAndPort);
+
+  // Get attribute "SecurityEnabled"
+  boolean securityEnabled = (boolean)mbs.getAttribute(mxbeanName,
+  "SecurityEnabled");
+  Assert.assertEquals(nn.isSecurityEnabled(), securityEnabled);
+
+  // Get attribute "LastHATransitionTime"
+  long lastHATransitionTime = (long)mbs.getAttribute(mxbeanName,
+  "LastHATransitionTime");
+  Assert.assertEquals(nn.getLastHATransitionTime(), lastHATransitionTime);
+
+  // Get attribute "BytesWithFutureGenerationStamps"
+  long bytesWithFutureGenerationStamps = (long)mbs.getAttribute(
+  mxbeanName, "BytesWithFutureGenerationStamps");
+  Assert.assertEquals(nn.getBytesWithFutureGenerationStamps(),
+  bytesWithFutureGenerationStamps);
+
+  // Get attribute "SlowPeersReport"
+  String 

[2/2] hadoop git commit: HDFS-11570. Unit test for NameNodeStatusMXBean. Contributed by Hanisha Koneru.

2017-03-24 Thread arp
HDFS-11570. Unit test for NameNodeStatusMXBean. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a79ad2d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a79ad2d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a79ad2d1

Branch: refs/heads/branch-2
Commit: a79ad2d12296859ee9af828f62332b0eacfd1bab
Parents: 757d9eb
Author: Hanisha Koneru 
Authored: Fri Mar 24 14:44:25 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Mar 24 14:44:33 2017 -0700

--
 .../namenode/TestNameNodeStatusMXBean.java  | 93 
 1 file changed, 93 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a79ad2d1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
new file mode 100644
index 000..c03dc20
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.TestDataNodeMXBean;
+import org.junit.Assert;
+import org.junit.Test;
+
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+import java.lang.management.ManagementFactory;
+
+/**
+ * Class for testing {@link NameNodeStatusMXBean} implementation.
+ */
+public class TestNameNodeStatusMXBean {
+
+  public static final Log LOG = LogFactory.getLog(
+  TestNameNodeStatusMXBean.class);
+
+  @Test(timeout = 12L)
+  public void testDataNodeMXBean() throws Exception {
+Configuration conf = new Configuration();
+MiniDFSCluster cluster = null;
+
+try {
+  cluster = new MiniDFSCluster.Builder(conf).build();
+  cluster.waitActive();
+
+  NameNode nn = cluster.getNameNode();
+
+  MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+  ObjectName mxbeanName = new ObjectName(
+  "Hadoop:service=NameNode,name=NameNodeStatus");
+
+  // Get attribute "NNRole"
+  String nnRole = (String)mbs.getAttribute(mxbeanName, "NNRole");
+  Assert.assertEquals(nn.getNNRole(), nnRole);
+
+  // Get attribute "State"
+  String state = (String)mbs.getAttribute(mxbeanName, "State");
+  Assert.assertEquals(nn.getState(), state);
+
+  // Get attribute "HostAndPort"
+  String hostAndPort = (String)mbs.getAttribute(mxbeanName, "HostAndPort");
+  Assert.assertEquals(nn.getHostAndPort(), hostAndPort);
+
+  // Get attribute "SecurityEnabled"
+  boolean securityEnabled = (boolean)mbs.getAttribute(mxbeanName,
+  "SecurityEnabled");
+  Assert.assertEquals(nn.isSecurityEnabled(), securityEnabled);
+
+  // Get attribute "LastHATransitionTime"
+  long lastHATransitionTime = (long)mbs.getAttribute(mxbeanName,
+  "LastHATransitionTime");
+  Assert.assertEquals(nn.getLastHATransitionTime(), lastHATransitionTime);
+
+  // Get attribute "BytesWithFutureGenerationStamps"
+  long bytesWithFutureGenerationStamps = (long)mbs.getAttribute(
+  mxbeanName, "BytesWithFutureGenerationStamps");
+  Assert.assertEquals(nn.getBytesWithFutureGenerationStamps(),
+  bytesWithFutureGenerationStamps);
+
+  // Get attribute "SlowPeersReport"
+  String slowPeersReport = (String)mbs.getAttribute(mxbeanName,
+  "SlowPeersReport");
+  

[1/2] hadoop git commit: HDFS-11534. Add counters for number of blocks in pending IBR. Contributed by Xiaobing Zhou.

2017-03-24 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6209e4c91 -> 757d9ebcc
  refs/heads/trunk d1b7439b4 -> 1168ece59


HDFS-11534. Add counters for number of blocks in pending IBR. Contributed by 
Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1168ece5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1168ece5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1168ece5

Branch: refs/heads/trunk
Commit: 1168ece59640d8ad3166e355d2e82deec2fbaf14
Parents: d1b7439
Author: Xiaobing Zhou 
Authored: Fri Mar 24 14:33:44 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Mar 24 14:33:44 2017 -0700

--
 .../hdfs/server/datanode/BPServiceActor.java|   8 +-
 .../datanode/IncrementalBlockReportManager.java |  41 +-
 .../datanode/metrics/DataNodeMetrics.java   |  35 +
 .../datanode/TestBlockCountersInPendingIBR.java | 146 +++
 4 files changed, 223 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1168ece5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index ee5eb72..ddc28b7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -126,7 +126,9 @@ class BPServiceActor implements Runnable {
 this.initialRegistrationComplete = lifelineNnAddr != null ?
 new CountDownLatch(1) : null;
 this.dnConf = dn.getDnConf();
-this.ibrManager = new IncrementalBlockReportManager(dnConf.ibrInterval);
+this.ibrManager = new IncrementalBlockReportManager(
+dnConf.ibrInterval,
+dn.getMetrics());
 prevBlockReportId = ThreadLocalRandom.current().nextLong();
 scheduler = new Scheduler(dnConf.heartBeatInterval,
 dnConf.getLifelineIntervalMs(), dnConf.blockReportInterval,
@@ -350,7 +352,7 @@ class BPServiceActor implements Runnable {
 // or we will report an RBW replica after the BlockReport already reports
 // a FINALIZED one.
 ibrManager.sendIBRs(bpNamenode, bpRegistration,
-bpos.getBlockPoolId(), dn.getMetrics());
+bpos.getBlockPoolId());
 
 long brCreateStartTime = monotonicNow();
 Map perVolumeBlockLists =
@@ -678,7 +680,7 @@ class BPServiceActor implements Runnable {
 }
 if (ibrManager.sendImmediately() || sendHeartbeat) {
   ibrManager.sendIBRs(bpNamenode, bpRegistration,
-  bpos.getBlockPoolId(), dn.getMetrics());
+  bpos.getBlockPoolId());
 }
 
 List cmds = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1168ece5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
index e95142d..1779374 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
@@ -52,6 +52,11 @@ class IncrementalBlockReportManager {
 /** The blocks in this IBR. */
 final Map blocks = Maps.newHashMap();
 
+private DataNodeMetrics dnMetrics;
+PerStorageIBR(final DataNodeMetrics dnMetrics) {
+  this.dnMetrics = dnMetrics;
+}
+
 /**
  * Remove the given block from this IBR
  * @return true if the block was removed; otherwise, return false.
@@ -76,6 +81,25 @@ class IncrementalBlockReportManager {
 /** Put the block to this IBR. */
 void put(ReceivedDeletedBlockInfo rdbi) {
   blocks.put(rdbi.getBlock(), rdbi);
+  increaseBlocksCounter(rdbi);
+}
+
+private void increaseBlocksCounter(
+final ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
+  switch (receivedDeletedBlockInfo.getStatus()) {
+  case RECEIVING_BLOCK:
+dnMetrics.incrBlocksReceivingInPendingIBR();
+break;
+  case RECEIVED_BLOCK:

[2/2] hadoop git commit: HDFS-11534. Add counters for number of blocks in pending IBR. Contributed by Xiaobing Zhou.

2017-03-24 Thread arp
HDFS-11534. Add counters for number of blocks in pending IBR. Contributed by 
Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/757d9ebc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/757d9ebc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/757d9ebc

Branch: refs/heads/branch-2
Commit: 757d9ebcc59ebdce32991005ec1f2d4565b235f0
Parents: 6209e4c
Author: Xiaobing Zhou 
Authored: Fri Mar 24 14:33:44 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Mar 24 14:40:00 2017 -0700

--
 .../hdfs/server/datanode/BPServiceActor.java|   8 +-
 .../datanode/IncrementalBlockReportManager.java |  41 +-
 .../datanode/metrics/DataNodeMetrics.java   |  36 +
 .../datanode/TestBlockCountersInPendingIBR.java | 146 +++
 4 files changed, 224 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/757d9ebc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index adbf025..ec8c79b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -125,7 +125,9 @@ class BPServiceActor implements Runnable {
 this.initialRegistrationComplete = lifelineNnAddr != null ?
 new CountDownLatch(1) : null;
 this.dnConf = dn.getDnConf();
-this.ibrManager = new IncrementalBlockReportManager(dnConf.ibrInterval);
+this.ibrManager = new IncrementalBlockReportManager(
+dnConf.ibrInterval,
+dn.getMetrics());
 prevBlockReportId = ThreadLocalRandom.current().nextLong();
 scheduler = new Scheduler(dnConf.heartBeatInterval,
 dnConf.getLifelineIntervalMs(), dnConf.blockReportInterval,
@@ -349,7 +351,7 @@ class BPServiceActor implements Runnable {
 // or we will report an RBW replica after the BlockReport already reports
 // a FINALIZED one.
 ibrManager.sendIBRs(bpNamenode, bpRegistration,
-bpos.getBlockPoolId(), dn.getMetrics());
+bpos.getBlockPoolId());
 
 long brCreateStartTime = monotonicNow();
 Map perVolumeBlockLists =
@@ -672,7 +674,7 @@ class BPServiceActor implements Runnable {
 }
 if (ibrManager.sendImmediately() || sendHeartbeat) {
   ibrManager.sendIBRs(bpNamenode, bpRegistration,
-  bpos.getBlockPoolId(), dn.getMetrics());
+  bpos.getBlockPoolId());
 }
 
 List cmds = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757d9ebc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
index e95142d..1779374 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
@@ -52,6 +52,11 @@ class IncrementalBlockReportManager {
 /** The blocks in this IBR. */
 final Map blocks = Maps.newHashMap();
 
+private DataNodeMetrics dnMetrics;
+PerStorageIBR(final DataNodeMetrics dnMetrics) {
+  this.dnMetrics = dnMetrics;
+}
+
 /**
  * Remove the given block from this IBR
  * @return true if the block was removed; otherwise, return false.
@@ -76,6 +81,25 @@ class IncrementalBlockReportManager {
 /** Put the block to this IBR. */
 void put(ReceivedDeletedBlockInfo rdbi) {
   blocks.put(rdbi.getBlock(), rdbi);
+  increaseBlocksCounter(rdbi);
+}
+
+private void increaseBlocksCounter(
+final ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
+  switch (receivedDeletedBlockInfo.getStatus()) {
+  case RECEIVING_BLOCK:
+dnMetrics.incrBlocksReceivingInPendingIBR();
+break;
+  case RECEIVED_BLOCK:
+dnMetrics.incrBlocksReceivedInPendingIBR();
+break;
+  case DELETED_BLOCK:
+

hadoop git commit: YARN-6334. TestRMFailover#testAutomaticFailover always passes even when it should fail (Contributed by Yufei Gu via Daniel Templeton)

2017-03-24 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 100b8ce05 -> 6209e4c91


YARN-6334. TestRMFailover#testAutomaticFailover always passes even when it 
should fail
(Contributed by Yufei Gu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6209e4c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6209e4c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6209e4c9

Branch: refs/heads/branch-2
Commit: 6209e4c913041b258aa2d4a08fd772f6ba31b789
Parents: 100b8ce
Author: Daniel Templeton 
Authored: Fri Mar 24 14:01:11 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Mar 24 14:02:39 2017 -0700

--
 .../hadoop/yarn/client/TestRMFailover.java  | 39 +++-
 1 file changed, 21 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6209e4c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index 4bf6a78..37ef017 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -30,6 +30,7 @@ import static org.mockito.Mockito.verify;
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.util.concurrent.TimeoutException;
 
 import javax.servlet.http.HttpServletResponse;
 
@@ -40,6 +41,7 @@ import org.apache.hadoop.ha.ClientBaseWithFixes;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.client.api.YarnClient;
@@ -59,6 +61,8 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.base.Supplier;
+
 public class TestRMFailover extends ClientBaseWithFixes {
   private static final Log LOG =
   LogFactory.getLog(TestRMFailover.class.getName());
@@ -159,6 +163,21 @@ public class TestRMFailover extends ClientBaseWithFixes {
 verifyConnections();
   }
 
+  private void verifyRMTransitionToStandby(final ResourceManager rm)
+  throws InterruptedException {
+try {
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return rm.getRMContext().getHAServiceState() ==
+  HAServiceState.STANDBY;
+}
+  }, 100, 2);
+} catch (TimeoutException e) {
+  fail("RM didn't transition to Standby.");
+}
+  }
+
   @Test
   public void testAutomaticFailover()
   throws YarnException, InterruptedException, IOException {
@@ -182,15 +201,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
 ResourceManager rm = cluster.getResourceManager(
 cluster.getActiveRMIndex());
 rm.handleTransitionToStandByInNewThread();
-int maxWaitingAttempts = 2000;
-while (maxWaitingAttempts-- > 0 ) {
-  if (rm.getRMContext().getHAServiceState() == HAServiceState.STANDBY) {
-break;
-  }
-  Thread.sleep(1);
-}
-Assert.assertFalse("RM didn't transition to Standby ",
-maxWaitingAttempts == 0);
+verifyRMTransitionToStandby(rm);
 verifyConnections();
   }
 
@@ -393,15 +404,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
 testThread.start();
 testThread.join();
 
-int maxWaitingAttempts = 2000;
-while (maxWaitingAttempts-- > 0) {
-  if (resourceManager.getRMContext().getHAServiceState()
-  == HAServiceState.STANDBY) {
-break;
-  }
-  Thread.sleep(1);
-}
-assertFalse("RM didn't transition to Standby ", maxWaitingAttempts < 0);
+verifyRMTransitionToStandby(resourceManager);
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14230. TestAdlFileSystemContractLive fails to clean up. Contributed by John Zhuge.

2017-03-24 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5130128a3 -> d370d8ddb


HADOOP-14230. TestAdlFileSystemContractLive fails to clean up. Contributed by 
John Zhuge.

(cherry picked from commit d1b7439b48caa18d64a94be1ad5e4927ce573ab8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d370d8dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d370d8dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d370d8dd

Branch: refs/heads/branch-2.8
Commit: d370d8ddb8b283a635342e43bbe1b4712a9f0d14
Parents: 5130128
Author: John Zhuge 
Authored: Fri Mar 24 13:41:34 2017 -0700
Committer: John Zhuge 
Committed: Fri Mar 24 13:41:34 2017 -0700

--
 .../apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d370d8dd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
index 88bacd9..98902cb 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
@@ -45,9 +45,8 @@ public class TestAdlFileSystemContractLive extends 
FileSystemContractBaseTest {
   protected void tearDown() throws Exception {
 if (AdlStorageConfiguration.isContractTestEnabled()) {
   cleanup();
-  adlStore = null;
-  fs = null;
 }
+super.tearDown();
   }
 
   private void cleanup() throws IOException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14230. TestAdlFileSystemContractLive fails to clean up. Contributed by John Zhuge.

2017-03-24 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bbd08bb7a -> 100b8ce05


HADOOP-14230. TestAdlFileSystemContractLive fails to clean up. Contributed by 
John Zhuge.

(cherry picked from commit d1b7439b48caa18d64a94be1ad5e4927ce573ab8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/100b8ce0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/100b8ce0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/100b8ce0

Branch: refs/heads/branch-2
Commit: 100b8ce05ba76bcb43e598cdb32dec808572a73c
Parents: bbd08bb
Author: John Zhuge 
Authored: Fri Mar 24 13:41:09 2017 -0700
Committer: John Zhuge 
Committed: Fri Mar 24 13:41:09 2017 -0700

--
 .../apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/100b8ce0/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
index 88bacd9..98902cb 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
@@ -45,9 +45,8 @@ public class TestAdlFileSystemContractLive extends 
FileSystemContractBaseTest {
   protected void tearDown() throws Exception {
 if (AdlStorageConfiguration.isContractTestEnabled()) {
   cleanup();
-  adlStore = null;
-  fs = null;
 }
+super.tearDown();
   }
 
   private void cleanup() throws IOException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14230. TestAdlFileSystemContractLive fails to clean up. Contributed by John Zhuge.

2017-03-24 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/trunk 332a997e1 -> d1b7439b4


HADOOP-14230. TestAdlFileSystemContractLive fails to clean up. Contributed by 
John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1b7439b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1b7439b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1b7439b

Branch: refs/heads/trunk
Commit: d1b7439b48caa18d64a94be1ad5e4927ce573ab8
Parents: 332a997
Author: John Zhuge 
Authored: Fri Mar 24 08:36:34 2017 -0700
Committer: John Zhuge 
Committed: Fri Mar 24 13:37:04 2017 -0700

--
 .../apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1b7439b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
index 657947e..9d055f1 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
@@ -45,9 +45,8 @@ public class TestAdlFileSystemContractLive extends 
FileSystemContractBaseTest {
   protected void tearDown() throws Exception {
 if (AdlStorageConfiguration.isContractTestEnabled()) {
   cleanup();
-  adlStore = null;
-  fs = null;
 }
+super.tearDown();
   }
 
   private void cleanup() throws IOException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2017-03-24 Thread junping_du
Repository: hadoop
Updated Tags:  refs/tags/rel/release-2.8.0 [created] 03c9a8652

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11170. Add builder-based create API to FileSystem. Contributed by SammiChen and Wei Zhou.

2017-03-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 52b00600d -> 332a997e1


HDFS-11170. Add builder-based create API to FileSystem. Contributed by 
SammiChen and Wei Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332a997e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332a997e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332a997e

Branch: refs/heads/trunk
Commit: 332a997e10cca88d9ab3aa8252102366b628eaec
Parents: 52b0060
Author: Andrew Wang 
Authored: Fri Mar 24 12:56:46 2017 -0700
Committer: Andrew Wang 
Committed: Fri Mar 24 12:56:46 2017 -0700

--
 .../hadoop/fs/FSDataOutputStreamBuilder.java| 142 +++
 .../java/org/apache/hadoop/fs/FileSystem.java   |   9 ++
 .../org/apache/hadoop/fs/FilterFileSystem.java  |   5 +
 .../org/apache/hadoop/fs/HarFileSystem.java |   5 +
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  54 +++
 .../hadoop/hdfs/DistributedFileSystem.java  |  81 +++
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  35 -
 .../namenode/TestFavoredNodesEndToEnd.java  |  23 +++
 8 files changed, 353 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332a997e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
new file mode 100644
index 000..2e885f3
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Progressable;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
+/** Base of specific file system FSDataOutputStreamBuilder. */
+public class FSDataOutputStreamBuilder{
+  private Path path = null;
+  private FsPermission permission = null;
+  private Integer bufferSize;
+  private Short replication;
+  private Long blockSize;
+  private Progressable progress = null;
+  private EnumSet flags = null;
+  private ChecksumOpt checksumOpt = null;
+
+  private final FileSystem fs;
+
+  public FSDataOutputStreamBuilder(FileSystem fileSystem, Path p) {
+fs = fileSystem;
+path = p;
+  }
+
+  protected Path getPath() {
+return path;
+  }
+
+  protected FsPermission getPermission() {
+if (permission == null) {
+  return FsPermission.getFileDefault();
+}
+return permission;
+  }
+
+  public FSDataOutputStreamBuilder setPermission(final FsPermission perm) {
+Preconditions.checkNotNull(perm);
+permission = perm;
+return this;
+  }
+
+  protected int getBufferSize() {
+if (bufferSize == null) {
+  return fs.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+  IO_FILE_BUFFER_SIZE_DEFAULT);
+}
+return bufferSize;
+  }
+
+  public FSDataOutputStreamBuilder setBufferSize(int bufSize) {
+bufferSize = bufSize;
+return this;
+  }
+
+  protected short getReplication() {
+if (replication == null) {
+  return fs.getDefaultReplication(getPath());
+}
+return replication;
+  }
+
+  public FSDataOutputStreamBuilder setReplication(short replica) {
+replication = replica;
+return this;
+  }
+
+  protected long getBlockSize() {
+if (blockSize == null) {
+  return fs.getDefaultBlockSize(getPath());
+}
+return blockSize;
+  }
+
+  

hadoop git commit: HADOOP-13715. Add isErasureCoded() API to FileStatus class. Contributed by Manoj Govindassamy.

2017-03-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 33815af42 -> 52b00600d


HADOOP-13715. Add isErasureCoded() API to FileStatus class. Contributed by 
Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52b00600
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52b00600
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52b00600

Branch: refs/heads/trunk
Commit: 52b00600df921763725396ed92194d3338167655
Parents: 33815af
Author: Andrew Wang 
Authored: Fri Mar 24 11:44:37 2017 -0700
Committer: Andrew Wang 
Committed: Fri Mar 24 11:44:46 2017 -0700

--
 .../java/org/apache/hadoop/fs/FileStatus.java   | 11 +++-
 .../hadoop/fs/permission/FsPermission.java  |  7 +++
 .../src/site/markdown/filesystem/filesystem.md  |  9 +++
 .../hadoop/fs/contract/ContractTestUtils.java   | 27 +
 .../hadoop/fs/viewfs/TestViewfsFileStatus.java  |  7 ++-
 .../hdfs/protocol/FsPermissionExtension.java| 17 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 13 +++--
 .../hadoop/fs/http/client/HttpFSFileSystem.java |  7 ++-
 .../hadoop/fs/http/server/FSOperations.java |  3 +
 .../fs/http/client/BaseTestHttpFSWith.java  | 24 +++-
 .../org/apache/hadoop/test/TestHdfsHelper.java  | 18 +-
 .../server/namenode/FSDirStatAndListingOp.java  | 10 ++--
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|  6 ++
 .../TestErasureCodingPolicyWithSnapshot.java| 26 +
 .../org/apache/hadoop/hdfs/TestFileStatus.java  | 15 +++--
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java | 15 +++--
 .../hdfs/server/namenode/FSAclBaseTest.java |  2 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 60 
 .../hadoop/fs/s3a/TestS3AGetFileStatus.java |  3 +
 .../apache/hadoop/fs/adl/TestGetFileStatus.java |  6 +-
 20 files changed, 256 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52b00600/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 72ca24f..26d3962 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -215,7 +215,16 @@ public class FileStatus implements Writable, 
Comparable,
   public boolean isEncrypted() {
 return permission.getEncryptedBit();
   }
-  
+
+  /**
+   * Tell whether the underlying file or directory is erasure coded or not.
+   *
+   * @return true if the underlying file or directory is erasure coded.
+   */
+  public boolean isErasureCoded() {
+return permission.getErasureCodedBit();
+  }
+
   /**
* Get the owner of the file.
* @return owner of the file. The string could be empty if there is no

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52b00600/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 56e19dc..ddb2724 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -312,6 +312,13 @@ public class FsPermission implements Writable, 
Serializable,
 return false;
   }
 
+  /**
+   * Returns true if the file or directory is erasure coded.
+   */
+  public boolean getErasureCodedBit() {
+return false;
+  }
+
   /** Set the user file creation mask (umask) */
   public static void setUMask(Configuration conf, FsPermission umask) {
 conf.set(UMASK_LABEL, String.format("%1$03o", umask.toShort()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52b00600/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index 201d397..97bc7d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -90,7 

hadoop git commit: YARN-6334. TestRMFailover#testAutomaticFailover always passes even when it should fail (Contributed by Yufei Gu via Daniel Templeton)

2017-03-24 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk d49d1862a -> 33815af42


YARN-6334. TestRMFailover#testAutomaticFailover always passes even when it 
should fail
(Contributed by Yufei Gu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33815af4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33815af4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33815af4

Branch: refs/heads/trunk
Commit: 33815af4242ac8c6b119128730e63f13164fd763
Parents: d49d186
Author: Daniel Templeton 
Authored: Fri Mar 24 11:42:54 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Mar 24 11:42:54 2017 -0700

--
 .../hadoop/yarn/client/TestRMFailover.java  | 39 +++-
 1 file changed, 21 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33815af4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index 4bf6a78..d568d6a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -30,6 +30,7 @@ import static org.mockito.Mockito.verify;
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.util.concurrent.TimeoutException;
 
 import javax.servlet.http.HttpServletResponse;
 
@@ -40,6 +41,7 @@ import org.apache.hadoop.ha.ClientBaseWithFixes;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.client.api.YarnClient;
@@ -59,6 +61,8 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.base.Supplier;
+
 public class TestRMFailover extends ClientBaseWithFixes {
   private static final Log LOG =
   LogFactory.getLog(TestRMFailover.class.getName());
@@ -159,6 +163,21 @@ public class TestRMFailover extends ClientBaseWithFixes {
 verifyConnections();
   }
 
+  private void verifyRMTransitionToStandby(ResourceManager rm)
+  throws InterruptedException {
+try {
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return rm.getRMContext().getHAServiceState() ==
+  HAServiceState.STANDBY;
+}
+  }, 100, 2);
+} catch (TimeoutException e) {
+  fail("RM didn't transition to Standby.");
+}
+  }
+
   @Test
   public void testAutomaticFailover()
   throws YarnException, InterruptedException, IOException {
@@ -182,15 +201,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
 ResourceManager rm = cluster.getResourceManager(
 cluster.getActiveRMIndex());
 rm.handleTransitionToStandByInNewThread();
-int maxWaitingAttempts = 2000;
-while (maxWaitingAttempts-- > 0 ) {
-  if (rm.getRMContext().getHAServiceState() == HAServiceState.STANDBY) {
-break;
-  }
-  Thread.sleep(1);
-}
-Assert.assertFalse("RM didn't transition to Standby ",
-maxWaitingAttempts == 0);
+verifyRMTransitionToStandby(rm);
 verifyConnections();
   }
 
@@ -393,15 +404,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
 testThread.start();
 testThread.join();
 
-int maxWaitingAttempts = 2000;
-while (maxWaitingAttempts-- > 0) {
-  if (resourceManager.getRMContext().getHAServiceState()
-  == HAServiceState.STANDBY) {
-break;
-  }
-  Thread.sleep(1);
-}
-assertFalse("RM didn't transition to Standby ", maxWaitingAttempts < 0);
+verifyRMTransitionToStandby(resourceManager);
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14211. FilterFs and ChRootedFs are too aggressive about enforcing 'authorityNeeded'. Contributed by Erik Krogen.

2017-03-24 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 a92f654aa -> 5130128a3


HADOOP-14211. FilterFs and ChRootedFs are too aggressive about enforcing 
'authorityNeeded'. Contributed by Erik Krogen.

(cherry picked from commit 0e556a5ba645570d381beca60114a1239b27d49f)
(cherry picked from commit 96fe940e59127dc7c3e4182c3ed450c3cd8d858e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5130128a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5130128a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5130128a

Branch: refs/heads/branch-2.8
Commit: 5130128a31a75587f6ccca08c487e44f73685227
Parents: a92f654
Author: Andrew Wang 
Authored: Fri Mar 24 11:12:02 2017 -0700
Committer: Zhe Zhang 
Committed: Fri Mar 24 11:25:58 2017 -0700

--
 .../src/main/java/org/apache/hadoop/fs/FilterFs.java|  3 +--
 .../java/org/apache/hadoop/fs/viewfs/ChRootedFs.java|  3 +--
 .../test/java/org/apache/hadoop/fs/TestFilterFs.java| 12 
 3 files changed, 14 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5130128a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index 6b1093e..5c16a4e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -57,8 +57,7 @@ public abstract class FilterFs extends AbstractFileSystem {
   }
   
   protected FilterFs(AbstractFileSystem fs) throws URISyntaxException {
-super(fs.getUri(), fs.getUri().getScheme(),
-fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
+super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort());
 myFs = fs;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5130128a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index d77ad8b..ad1f5b5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -101,8 +101,7 @@ class ChRootedFs extends AbstractFileSystem {
 
   public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
 throws URISyntaxException {
-super(fs.getUri(), fs.getUri().getScheme(),
-fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
+super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort());
 myFs = fs;
 myFs.checkPath(theRoot);
 chRootPathPart = new Path(myFs.getUriPath(theRoot));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5130128a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
index 27d093c..a2f0905 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
@@ -25,6 +25,8 @@ import java.util.Iterator;
 
 import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.viewfs.ConfigUtil;
 
 public class TestFilterFs extends TestCase {
 
@@ -65,4 +67,14 @@ public class TestFilterFs extends TestCase {
 }
   }
   
+  // Test that FilterFs will accept an AbstractFileSystem to be filtered which
+  // has an optional authority, such as ViewFs
+  public void testFilteringWithNonrequiredAuthority() throws Exception {
+Configuration conf = new Configuration();
+ConfigUtil.addLink(conf, "custom", "/mnt", URI.create("file:///"));
+FileContext fc =
+FileContext.getFileContext(URI.create("viewfs://custom/"), conf);
+new FilterFs(fc.getDefaultFileSystem()) {};
+  }
+
 }


-
To unsubscribe, e-mail: 

hadoop git commit: YARN-6360. Prevent FS state dump logger from cramming other log files (Contributed by Yufei Gu via Daniel Templeton)

2017-03-24 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 96fe940e5 -> bbd08bb7a


YARN-6360. Prevent FS state dump logger from cramming other log files
(Contributed by Yufei Gu via Daniel Templeton)

(cherry picked from commit d49d1862a645b8c5d3eae95bf633bc82816f0168)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbd08bb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbd08bb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbd08bb7

Branch: refs/heads/branch-2
Commit: bbd08bb7a4170d53557dfe4c26379c760b7ca12e
Parents: 96fe940
Author: Daniel Templeton 
Authored: Fri Mar 24 11:29:20 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Mar 24 11:31:58 2017 -0700

--
 hadoop-common-project/hadoop-common/src/main/conf/log4j.properties | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd08bb7/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 7c02b20..b83eb6f 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -324,6 +324,7 @@ 
log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
 
 # Fair scheduler requests log on state dump
 
log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSLOGGER
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=false
 log4j.appender.FSLOGGER=org.apache.log4j.RollingFileAppender
 log4j.appender.FSLOGGER.File=${hadoop.log.dir}/fairscheduler-statedump.log
 log4j.appender.FSLOGGER.layout=org.apache.log4j.PatternLayout


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6360. Prevent FS state dump logger from cramming other log files (Contributed by Yufei Gu via Daniel Templeton)

2017-03-24 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0e556a5ba -> d49d1862a


YARN-6360. Prevent FS state dump logger from cramming other log files
(Contributed by Yufei Gu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d49d1862
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d49d1862
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d49d1862

Branch: refs/heads/trunk
Commit: d49d1862a645b8c5d3eae95bf633bc82816f0168
Parents: 0e556a5
Author: Daniel Templeton 
Authored: Fri Mar 24 11:29:20 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Mar 24 11:31:04 2017 -0700

--
 hadoop-common-project/hadoop-common/src/main/conf/log4j.properties | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d49d1862/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 6026763..b4658ae 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -324,6 +324,7 @@ 
log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
 
 # Fair scheduler requests log on state dump
 
log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSLOGGER
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=false
 log4j.appender.FSLOGGER=org.apache.log4j.RollingFileAppender
 log4j.appender.FSLOGGER.File=${hadoop.log.dir}/fairscheduler-statedump.log
 log4j.appender.FSLOGGER.layout=org.apache.log4j.PatternLayout


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-13966. S3Guard: Add ability to start DDB local server in every test. Contributed by Mingliang Liu

2017-03-24 Thread liuml07
HADOOP-13966. S3Guard: Add ability to start DDB local server in every test. 
Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed15abaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed15abaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed15abaf

Branch: refs/heads/HADOOP-13345
Commit: ed15abaf377b868bd37eb0214651e214693404c7
Parents: 737804b
Author: Mingliang Liu 
Authored: Mon Mar 20 22:38:04 2017 -0700
Committer: Mingliang Liu 
Committed: Fri Mar 24 11:04:18 2017 -0700

--
 hadoop-tools/hadoop-aws/pom.xml |  13 ++
 .../fs/s3a/s3guard/DynamoDBClientFactory.java   |  35 +++--
 .../apache/hadoop/fs/s3a/s3guard/S3Guard.java   |   4 +-
 .../site/markdown/tools/hadoop-aws/s3guard.md   |  18 ++-
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |   1 +
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |   7 +
 .../s3a/s3guard/DynamoDBLocalClientFactory.java | 148 +++
 .../s3a/s3guard/TestDynamoDBMetadataStore.java  |  73 ++---
 8 files changed, 218 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed15abaf/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index de9697e..0606a3d 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -312,6 +312,19 @@
   
 
 
+
+
+  dynamodblocal
+  
+
+  dynamodblocal
+
+  
+  
+
dynamodblocal
+  
+
+
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed15abaf/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
index c2fe6a0..ecdc3e1 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
@@ -30,6 +30,7 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -43,7 +44,8 @@ import static 
org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProviderSet;
  *
  * Implementation should be configured for setting and getting configuration.
  */
-interface DynamoDBClientFactory extends Configurable {
+@InterfaceAudience.Private
+public interface DynamoDBClientFactory extends Configurable {
   Logger LOG = LoggerFactory.getLogger(DynamoDBClientFactory.class);
 
   /**
@@ -77,6 +79,26 @@ interface DynamoDBClientFactory extends Configurable {
   final ClientConfiguration awsConf =
   DefaultS3ClientFactory.createAwsConf(conf);
 
+  final String region = getRegion(conf, defaultRegion);
+  LOG.debug("Creating DynamoDB client in region {}", region);
+
+  return AmazonDynamoDBClientBuilder.standard()
+  .withCredentials(credentials)
+  .withClientConfiguration(awsConf)
+  .withRegion(region)
+  .build();
+}
+
+/**
+ * Helper method to get and validate the AWS region for DynamoDBClient.
+ *
+ * @param conf configuration
+ * @param defaultRegion the default region
+ * @return configured region or else the provided default region
+ * @throws IOException if the region is not valid
+ */
+static String getRegion(Configuration conf, String defaultRegion)
+throws IOException {
   String region = conf.getTrimmed(S3GUARD_DDB_REGION_KEY);
   if (StringUtils.isEmpty(region)) {
 region = defaultRegion;
@@ -85,17 +107,10 @@ interface DynamoDBClientFactory extends Configurable {
 Regions.fromName(region);
   } catch (IllegalArgumentException | NullPointerException e) {
 throw new IOException("Invalid region specified: " + region + "; " +
-"Region can be configured with " + S3GUARD_DDB_REGION_KEY +": " +
+"Region can be configured with " + S3GUARD_DDB_REGION_KEY + ": " +
 validRegionsString());
   }
-
-  LOG.debug("Creating DynamoDB client in region {}", region);
-
-  return AmazonDynamoDBClientBuilder.standard()
-  .withCredentials(credentials)
-  

[1/2] hadoop git commit: Revert "HADOOP-13966 Add ability to start DDB local server in every test."

2017-03-24 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 9521c96c6 -> ed15abaf3


Revert "HADOOP-13966 Add ability to start DDB local server in every test."

This reverts commit 9521c96c6a4f7227f51157336cba09156dea1cbc.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/737804b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/737804b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/737804b7

Branch: refs/heads/HADOOP-13345
Commit: 737804b742ddf8c86fd4f2789c4f0814583809a5
Parents: 9521c96
Author: Mingliang Liu 
Authored: Fri Mar 24 11:03:39 2017 -0700
Committer: Mingliang Liu 
Committed: Fri Mar 24 11:03:39 2017 -0700

--
 hadoop-tools/hadoop-aws/pom.xml |  13 --
 .../fs/s3a/s3guard/DynamoDBClientFactory.java   |   4 +-
 .../apache/hadoop/fs/s3a/s3guard/S3Guard.java   |   4 +-
 .../site/markdown/tools/hadoop-aws/s3guard.md   |  18 +--
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |   1 -
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |   7 -
 .../s3a/s3guard/DynamoDBLocalClientFactory.java | 142 ---
 .../s3a/s3guard/TestDynamoDBMetadataStore.java  |  73 --
 8 files changed, 72 insertions(+), 190 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/737804b7/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 0606a3d..de9697e 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -312,19 +312,6 @@
   
 
 
-
-
-  dynamodblocal
-  
-
-  dynamodblocal
-
-  
-  
-
dynamodblocal
-  
-
-
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/737804b7/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
index c780189..c2fe6a0 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
@@ -30,7 +30,6 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -44,8 +43,7 @@ import static 
org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProviderSet;
  *
  * Implementation should be configured for setting and getting configuration.
  */
-@InterfaceAudience.Private
-public interface DynamoDBClientFactory extends Configurable {
+interface DynamoDBClientFactory extends Configurable {
   Logger LOG = LoggerFactory.getLogger(DynamoDBClientFactory.class);
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/737804b7/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
index cebc50e..9658ed9 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.fs.s3a.s3guard;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -51,8 +50,7 @@ public final class S3Guard {
 
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
-  @VisibleForTesting
-  public static final String S3GUARD_DDB_CLIENT_FACTORY_IMPL =
+  static final String S3GUARD_DDB_CLIENT_FACTORY_IMPL =
   "fs.s3a.s3guard.ddb.client.factory.impl";
 
   static final Class

http://git-wip-us.apache.org/repos/asf/hadoop/blob/737804b7/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
index d8c0768..7619b2a 100644
--- 

hadoop git commit: HADOOP-14211. FilterFs and ChRootedFs are too aggressive about enforcing 'authorityNeeded'. Contributed by Erik Krogen.

2017-03-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 14414705f -> 96fe940e5


HADOOP-14211. FilterFs and ChRootedFs are too aggressive about enforcing 
'authorityNeeded'. Contributed by Erik Krogen.

(cherry picked from commit 0e556a5ba645570d381beca60114a1239b27d49f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96fe940e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96fe940e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96fe940e

Branch: refs/heads/branch-2
Commit: 96fe940e59127dc7c3e4182c3ed450c3cd8d858e
Parents: 1441470
Author: Andrew Wang 
Authored: Fri Mar 24 11:12:02 2017 -0700
Committer: Andrew Wang 
Committed: Fri Mar 24 11:12:31 2017 -0700

--
 .../src/main/java/org/apache/hadoop/fs/FilterFs.java|  3 +--
 .../java/org/apache/hadoop/fs/viewfs/ChRootedFs.java|  3 +--
 .../test/java/org/apache/hadoop/fs/TestFilterFs.java| 12 
 3 files changed, 14 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fe940e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index 6b1093e..5c16a4e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -57,8 +57,7 @@ public abstract class FilterFs extends AbstractFileSystem {
   }
   
   protected FilterFs(AbstractFileSystem fs) throws URISyntaxException {
-super(fs.getUri(), fs.getUri().getScheme(),
-fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
+super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort());
 myFs = fs;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fe940e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index d77ad8b..ad1f5b5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -101,8 +101,7 @@ class ChRootedFs extends AbstractFileSystem {
 
   public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
 throws URISyntaxException {
-super(fs.getUri(), fs.getUri().getScheme(),
-fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
+super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort());
 myFs = fs;
 myFs.checkPath(theRoot);
 chRootPathPart = new Path(myFs.getUriPath(theRoot));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fe940e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
index 27d093c..a2f0905 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
@@ -25,6 +25,8 @@ import java.util.Iterator;
 
 import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.viewfs.ConfigUtil;
 
 public class TestFilterFs extends TestCase {
 
@@ -65,4 +67,14 @@ public class TestFilterFs extends TestCase {
 }
   }
   
+  // Test that FilterFs will accept an AbstractFileSystem to be filtered which
+  // has an optional authority, such as ViewFs
+  public void testFilteringWithNonrequiredAuthority() throws Exception {
+Configuration conf = new Configuration();
+ConfigUtil.addLink(conf, "custom", "/mnt", URI.create("file:///"));
+FileContext fc =
+FileContext.getFileContext(URI.create("viewfs://custom/"), conf);
+new FilterFs(fc.getDefaultFileSystem()) {};
+  }
+
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

hadoop git commit: HADOOP-14211. FilterFs and ChRootedFs are too aggressive about enforcing 'authorityNeeded'. Contributed by Erik Krogen.

2017-03-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk d4f73e7e2 -> 0e556a5ba


HADOOP-14211. FilterFs and ChRootedFs are too aggressive about enforcing 
'authorityNeeded'. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e556a5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e556a5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e556a5b

Branch: refs/heads/trunk
Commit: 0e556a5ba645570d381beca60114a1239b27d49f
Parents: d4f73e7
Author: Andrew Wang 
Authored: Fri Mar 24 11:12:02 2017 -0700
Committer: Andrew Wang 
Committed: Fri Mar 24 11:12:02 2017 -0700

--
 .../src/main/java/org/apache/hadoop/fs/FilterFs.java|  3 +--
 .../java/org/apache/hadoop/fs/viewfs/ChRootedFs.java|  3 +--
 .../test/java/org/apache/hadoop/fs/TestFilterFs.java| 12 
 3 files changed, 14 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e556a5b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index 6b1093e..5c16a4e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -57,8 +57,7 @@ public abstract class FilterFs extends AbstractFileSystem {
   }
   
   protected FilterFs(AbstractFileSystem fs) throws URISyntaxException {
-super(fs.getUri(), fs.getUri().getScheme(),
-fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
+super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort());
 myFs = fs;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e556a5b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index d77ad8b..ad1f5b5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -101,8 +101,7 @@ class ChRootedFs extends AbstractFileSystem {
 
   public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
 throws URISyntaxException {
-super(fs.getUri(), fs.getUri().getScheme(),
-fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
+super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort());
 myFs = fs;
 myFs.checkPath(theRoot);
 chRootPathPart = new Path(myFs.getUriPath(theRoot));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e556a5b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
index 27d093c..a2f0905 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
@@ -25,6 +25,8 @@ import java.util.Iterator;
 
 import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.viewfs.ConfigUtil;
 
 public class TestFilterFs extends TestCase {
 
@@ -65,4 +67,14 @@ public class TestFilterFs extends TestCase {
 }
   }
   
+  // Test that FilterFs will accept an AbstractFileSystem to be filtered which
+  // has an optional authority, such as ViewFs
+  public void testFilteringWithNonrequiredAuthority() throws Exception {
+Configuration conf = new Configuration();
+ConfigUtil.addLink(conf, "custom", "/mnt", URI.create("file:///"));
+FileContext fc =
+FileContext.getFileContext(URI.create("viewfs://custom/"), conf);
+new FilterFs(fc.getDefaultFileSystem()) {};
+  }
+
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11574. Spelling mistakes in the Java source. Contributed by Hu Xiaodong.

2017-03-24 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/trunk ab759e91b -> d4f73e7e2


HDFS-11574. Spelling mistakes in the Java source. Contributed by Hu Xiaodong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4f73e7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4f73e7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4f73e7e

Branch: refs/heads/trunk
Commit: d4f73e7e27141ce0a88edb96fa304418a97a82a3
Parents: ab759e9
Author: Ravi Prakash 
Authored: Fri Mar 24 09:38:17 2017 -0700
Committer: Ravi Prakash 
Committed: Fri Mar 24 09:38:17 2017 -0700

--
 .../hdfs/server/namenode/ha/RequestHedgingProxyProvider.java | 2 +-
 .../java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java| 2 +-
 .../hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java | 2 +-
 .../hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java| 4 ++--
 .../hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java| 2 +-
 .../apache/hadoop/yarn/server/resourcemanager/Application.java   | 2 +-
 6 files changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f73e7e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index a765e95..2f6c9bc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -44,7 +44,7 @@ import org.slf4j.LoggerFactory;
  * per-se. It constructs a wrapper proxy that sends the request to ALL
  * underlying proxies simultaneously. It assumes the in an HA setup, there will
  * be only one Active, and the active should respond faster than any configured
- * standbys. Once it recieve a response from any one of the configred proxies,
+ * standbys. Once it receive a response from any one of the configred proxies,
  * outstanding requests to other proxies are immediately cancelled.
  */
 public class RequestHedgingProxyProvider extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f73e7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
index aeff16d..3f4fe28 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
@@ -133,7 +133,7 @@ public class TestDataTransferProtocol {
   LOG.info("Expected: " + expected);
   
   if (eofExpected) {
-throw new IOException("Did not recieve IOException when an exception " 
+
+throw new IOException("Did not receive IOException when an exception " 
+
   "is expected while reading from " + datanode); 
   }
   assertEquals(expected, received);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f73e7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
index 12fa211..e29d518 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
@@ -1282,7 +1282,7 @@ public class TestRetryCacheWithHA {
 
   /**
* When NN failover happens, if the client did not receive the response and
-   * send a retry request to the other NN, the same response should be recieved
+   * send a retry request to the other NN, the same response should be received
* based on the retry cache.
*/
   public void testClientRetryWithFailover(final AtMostOnceOp op)


hadoop git commit: HDFS-10506. OIV's ReverseXML processor cannot reconstruct some snapshot details. Contributed by Akira Ajisaka.

2017-03-24 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d946dfcbe -> 14414705f


HDFS-10506. OIV's ReverseXML processor cannot reconstruct some snapshot 
details. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14414705
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14414705
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14414705

Branch: refs/heads/branch-2
Commit: 14414705f79495eda11e302f38c792128fe0182b
Parents: d946dfc
Author: Wei-Chiu Chuang 
Authored: Fri Mar 24 08:43:14 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Mar 24 08:43:14 2017 -0700

--
 .../OfflineImageReconstructor.java  | 84 ++--
 .../offlineImageViewer/PBImageXmlWriter.java| 34 ++--
 .../TestOfflineImageViewer.java | 15 +++-
 3 files changed, 103 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14414705/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index 137ceff..1f629b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -568,6 +568,13 @@ class OfflineImageReconstructor {
   private void processFileXml(Node node, INodeSection.INode.Builder inodeBld)
   throws IOException {
 inodeBld.setType(INodeSection.INode.Type.FILE);
+INodeSection.INodeFile.Builder bld = createINodeFileBuilder(node);
+inodeBld.setFile(bld);
+// Will check remaining keys and serialize in processINodeXml
+  }
+
+  private INodeSection.INodeFile.Builder createINodeFileBuilder(Node node)
+  throws IOException {
 INodeSection.INodeFile.Builder bld = INodeSection.INodeFile.newBuilder();
 Integer ival = node.removeChildInt(SECTION_REPLICATION);
 if (ival != null) {
@@ -596,24 +603,7 @@ class OfflineImageReconstructor {
 if (block == null) {
   break;
 }
-HdfsProtos.BlockProto.Builder blockBld =
-HdfsProtos.BlockProto.newBuilder();
-Long id = block.removeChildLong(SECTION_ID);
-if (id == null) {
-  throw new IOException(" found without ");
-}
-blockBld.setBlockId(id);
-Long genstamp = block.removeChildLong(INODE_SECTION_GEMSTAMP);
-if (genstamp == null) {
-  throw new IOException(" found without ");
-}
-blockBld.setGenStamp(genstamp);
-Long numBytes = block.removeChildLong(INODE_SECTION_NUM_BYTES);
-if (numBytes == null) {
-  throw new IOException(" found without ");
-}
-blockBld.setNumBytes(numBytes);
-bld.addBlocks(blockBld);
+bld.addBlocks(createBlockBuilder(block));
   }
 }
 Node fileUnderConstruction =
@@ -650,14 +640,44 @@ class OfflineImageReconstructor {
 if (ival != null) {
   bld.setStoragePolicyID(ival);
 }
-inodeBld.setFile(bld);
+return bld;
 // Will check remaining keys and serialize in processINodeXml
   }
 
+  private HdfsProtos.BlockProto.Builder createBlockBuilder(Node block)
+  throws IOException {
+HdfsProtos.BlockProto.Builder blockBld =
+HdfsProtos.BlockProto.newBuilder();
+Long id = block.removeChildLong(SECTION_ID);
+if (id == null) {
+  throw new IOException(" found without ");
+}
+blockBld.setBlockId(id);
+Long genstamp = block.removeChildLong(INODE_SECTION_GENSTAMP);
+if (genstamp == null) {
+  throw new IOException(" found without ");
+}
+blockBld.setGenStamp(genstamp);
+Long numBytes = block.removeChildLong(INODE_SECTION_NUM_BYTES);
+if (numBytes == null) {
+  throw new IOException(" found without ");
+}
+blockBld.setNumBytes(numBytes);
+return blockBld;
+  }
+
   private void processDirectoryXml(Node node,
   INodeSection.INode.Builder inodeBld) throws IOException {
 inodeBld.setType(INodeSection.INode.Type.DIRECTORY);
 INodeSection.INodeDirectory.Builder bld =
+createINodeDirectoryBuilder(node);
+inodeBld.setDirectory(bld);
+// Will check remaining keys and serialize in processINodeXml
+  }
+
+  private INodeSection.INodeDirectory.Builder
+  

[Hadoop Wiki] Update of "PoweredBy" by RemySaissy

2017-03-24 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "PoweredBy" page has been changed by RemySaissy:
https://wiki.apache.org/hadoop/PoweredBy?action=diff=440=441

Comment:
Criteo company description updated.

  
   * ''[[http://criteo.com|Criteo]] - Criteo is a global leader in online 
performance advertising ''
* ''[[http://labs.criteo.com/blog|Criteo R]] uses Hadoop as a 
consolidated platform for storage, analytics and back-end processing, including 
Machine Learning algorithms ''
-   * ''We currently have a dedicated cluster of 1117 nodes, 39PB storage, 75TB 
RAM, 22000 cores running full steam 24/7, and growing by the day ''
-   * ''Each node has 24 HT cores, 96GB RAM, 42TB HDD ''
-   * ''Hardware and platform management is done through 
[[http://www.getchef.com/|Chef]], we run YARN ''
-   * ''We run a mix of ad-hoc Hive queries for BI, 
[[http://www.cascading.org/|Cascading]] jobs, raw mapreduce jobs, and streaming 
[[http://www.mono-project.com/|Mono]] jobs, as well as some Pig ''
-   * ''To be delivered in Q2 2015 a second cluster of 600 nodes, each 48HT 
cores, 256GB RAM, 96TB HDD ''
+   * ''We have 5 clusters in total, 2 of which are production, each with a 
corresponding pre-production and an experimental one ''
+   * ''More than 47,896 cores in ~2,560 machines running Hadoop (> 4,300 
machines by the end of 2017) ''
+   * ''Our main cluster: 1,353 machines (24 cores w 15*6TB disk & 256GB RAM) ''
+* ''Growth to ~3,000 machines by the end of 2017 ''
+   * ''We run a mix of ''
+* ''Ad-hoc Hive queries for BI ''
+* ''Cascading/Scalding jobs ''
+* ''Mapreduce jobs ''
+* ''Spark jobs ''
+* ''Streaming Mono jobs ''
  
   * ''[[http://www.crs4.it|CRS4]] ''
* ''Hadoop deployed dynamically on subsets of a 400-node cluster ''

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[Hadoop Wiki] Update of "Misty" by SteveLoughran

2017-03-24 Thread Apache Wiki
Dear wiki user,

You have subscribed to a wiki page "Hadoop Wiki" for change notification.

The page "Misty" has been deleted by SteveLoughran:

https://wiki.apache.org/hadoop/Misty?action=diff=1=2

Comment:
junk user page

- ##master-page:HomepageTemplate
- #format wiki
- #language en
- == @``ME@ ==
  
- Email: <>
- ## You can even more obfuscate your email address by adding more uppercase 
letters followed by a leading and trailing blank.
- 
- ...
- 
- 
- CategoryHomepage
- 

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10649. Remove unused PermissionStatus#applyUMask. Contributed by Chen Liang.

2017-03-24 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/trunk 128015584 -> ab759e91b


HDFS-10649. Remove unused PermissionStatus#applyUMask. Contributed by Chen 
Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab759e91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab759e91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab759e91

Branch: refs/heads/trunk
Commit: ab759e91b746fbd1d8d70f45e896a9f4bd7abf7e
Parents: 1280155
Author: John Zhuge 
Authored: Thu Mar 23 23:31:27 2017 -0700
Committer: John Zhuge 
Committed: Thu Mar 23 23:31:27 2017 -0700

--
 .../apache/hadoop/fs/permission/PermissionStatus.java  | 13 -
 1 file changed, 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab759e91/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
index bc9e392..3c3693f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
@@ -44,10 +44,6 @@ public class PermissionStatus implements Writable {
   String user, String group, FsPermission permission) {
 return new PermissionStatus(user, group, permission) {
   @Override
-  public PermissionStatus applyUMask(FsPermission umask) {
-throw new UnsupportedOperationException();
-  }
-  @Override
   public void readFields(DataInput in) throws IOException {
 throw new UnsupportedOperationException();
   }
@@ -76,15 +72,6 @@ public class PermissionStatus implements Writable {
   /** Return permission */
   public FsPermission getPermission() {return permission;}
 
-  /**
-   * Apply umask.
-   * @see FsPermission#applyUMask(FsPermission)
-   */
-  public PermissionStatus applyUMask(FsPermission umask) {
-permission = permission.applyUMask(umask);
-return this;
-  }
-
   @Override
   public void readFields(DataInput in) throws IOException {
 username = Text.readString(in, Text.DEFAULT_MAX_LEN);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org