hadoop git commit: HDFS-4383. Document the lease limits. Contributed by Arshad Mohammad.

2015-05-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk fb6b38d67 - dc8434ab2


HDFS-4383. Document the lease limits. Contributed by Arshad Mohammad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc8434ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc8434ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc8434ab

Branch: refs/heads/trunk
Commit: dc8434ab2b177ca9673bd8eecf7b185d4c4ffb31
Parents: fb6b38d
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 21 17:30:43 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 21 17:30:43 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hdfs/server/common/HdfsServerConstants.java | 23 +---
 2 files changed, 22 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8434ab/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5bcaddd..77d7369 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -575,6 +575,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-4185. Add a metric for number of active leases (Rakesh R via raviprak)
 
+HDFS-4383. Document the lease limits. (Arshad Mohammad via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8434ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index c664b01..26a7ab3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -42,10 +42,27 @@ import org.apache.hadoop.util.StringUtils;
 @InterfaceAudience.Private
 public interface HdfsServerConstants {
   int MIN_BLOCKS_FOR_WRITE = 1;
-  //
-  // Timeouts, constants
-  //
+  /**
+   * For a HDFS client to write to a file, a lease is granted; During the lease
+   * period, no other client can write to the file. The writing client can
+   * periodically renew the lease. When the file is closed, the lease is
+   * revoked. The lease duration is bound by this soft limit and a
+   * {@link HdfsServerConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the
+   * soft limit expires, the writer has sole write access to the file. If the
+   * soft limit expires and the client fails to close the file or renew the
+   * lease, another client can preempt the lease.
+   */
   long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
+  /**
+   * For a HDFS client to write to a file, a lease is granted; During the lease
+   * period, no other client can write to the file. The writing client can
+   * periodically renew the lease. When the file is closed, the lease is
+   * revoked. The lease duration is bound by a
+   * {@link HdfsServerConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this 
hard
+   * limit. If after the hard limit expires and the client has failed to renew
+   * the lease, HDFS assumes that the client has quit and will automatically
+   * close the file on behalf of the writer, and recover the lease.
+   */
   long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
   long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
   // We need to limit the length and depth of a path in the filesystem.



hadoop git commit: HDFS-4383. Document the lease limits. Contributed by Arshad Mohammad.

2015-05-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e30f91644 - 185cddfd6


HDFS-4383. Document the lease limits. Contributed by Arshad Mohammad.

(cherry picked from commit dc8434ab2b177ca9673bd8eecf7b185d4c4ffb31)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/185cddfd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/185cddfd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/185cddfd

Branch: refs/heads/branch-2
Commit: 185cddfd6a04b88aa867f52b34be63f371a33c6b
Parents: e30f916
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 21 17:30:43 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 21 17:31:47 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hdfs/server/common/HdfsServerConstants.java | 23 +---
 2 files changed, 22 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/185cddfd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5f98b72..414bba5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -238,6 +238,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-4185. Add a metric for number of active leases (Rakesh R via raviprak)
 
+HDFS-4383. Document the lease limits. (Arshad Mohammad via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/185cddfd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 63c0ac7..eeacd71 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -42,10 +42,27 @@ import org.apache.hadoop.util.StringUtils;
 @InterfaceAudience.Private
 public interface HdfsServerConstants {
   int MIN_BLOCKS_FOR_WRITE = 1;
-  //
-  // Timeouts, constants
-  //
+  /**
+   * For a HDFS client to write to a file, a lease is granted; During the lease
+   * period, no other client can write to the file. The writing client can
+   * periodically renew the lease. When the file is closed, the lease is
+   * revoked. The lease duration is bound by this soft limit and a
+   * {@link HdfsServerConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the
+   * soft limit expires, the writer has sole write access to the file. If the
+   * soft limit expires and the client fails to close the file or renew the
+   * lease, another client can preempt the lease.
+   */
   long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
+  /**
+   * For a HDFS client to write to a file, a lease is granted; During the lease
+   * period, no other client can write to the file. The writing client can
+   * periodically renew the lease. When the file is closed, the lease is
+   * revoked. The lease duration is bound by a
+   * {@link HdfsServerConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this 
hard
+   * limit. If after the hard limit expires and the client has failed to renew
+   * the lease, HDFS assumes that the client has quit and will automatically
+   * close the file on behalf of the writer, and recover the lease.
+   */
   long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
   long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
   // We need to limit the length and depth of a path in the filesystem.



hadoop git commit: HADOOP-10366. Add whitespaces between classes for values in core-default.xml to fit better in browser. Contributed by kanaka kumar avvaru.

2015-05-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 185cddfd6 - b578f76be


HADOOP-10366. Add whitespaces between classes for values in core-default.xml to 
fit better in browser. Contributed by kanaka kumar avvaru.

(cherry picked from commit 0e4f1081c7a98e1c0c4f922f5e2afe467a0d763f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b578f76b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b578f76b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b578f76b

Branch: refs/heads/branch-2
Commit: b578f76bed535e4233f8694f87a7fb966a4a18e7
Parents: 185cddf
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 21 17:52:03 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 21 17:53:18 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 4 
 .../hadoop-common/src/main/resources/core-default.xml| 4 ++--
 .../hadoop-hdfs/src/site/markdown/TransparentEncryption.md   | 2 +-
 3 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b578f76b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 48719d0..e58e9cb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -125,6 +125,10 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11995. Make jetty version configurable from the maven command line.
 (Sriharsha Devineni via wheat9)
 
+HADOOP-10366. Add whitespaces between classes for values in
+core-default.xml to fit better in browser.
+(kanaka kumar avvaru via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b578f76b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 4909a52..967c51c 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -467,7 +467,7 @@ for ldap providers in the same way as above does.
 
 property
   nameio.serializations/name
-  
valueorg.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization/value
+  valueorg.apache.hadoop.io.serializer.WritableSerialization, 
org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization, 
org.apache.hadoop.io.serializer.avro.AvroReflectSerialization/value
   descriptionA list of serialization classes that can be used for
   obtaining serializers and deserializers./description
 /property
@@ -1657,7 +1657,7 @@ for ldap providers in the same way as above does.
 
 property
   namehadoop.security.crypto.codec.classes.aes.ctr.nopadding/name
-  
valueorg.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec/value
+  valueorg.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, 
org.apache.hadoop.crypto.JceAesCtrCryptoCodec/value
   description
 Comma-separated list of crypto codec implementations for 
AES/CTR/NoPadding. 
 The first implementation will be used if available, others are fallbacks.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b578f76b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
index aa2acbd..05e4249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
@@ -126,7 +126,7 @@ The prefix for a given crypto codec, contains a 
comma-separated list of implemen
 
  hadoop.security.crypto.codec.classes.aes.ctr.nopadding
 
-Default: 
`org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec`
+Default: `org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, 
org.apache.hadoop.crypto.JceAesCtrCryptoCodec`
 
 Comma-separated list of crypto codec implementations for AES/CTR/NoPadding. 
The first implementation will be used if available, others are fallbacks.
 



hadoop git commit: HADOOP-10366. Add whitespaces between classes for values in core-default.xml to fit better in browser. Contributed by kanaka kumar avvaru.

2015-05-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk dc8434ab2 - 0e4f1081c


HADOOP-10366. Add whitespaces between classes for values in core-default.xml to 
fit better in browser. Contributed by kanaka kumar avvaru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e4f1081
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e4f1081
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e4f1081

Branch: refs/heads/trunk
Commit: 0e4f1081c7a98e1c0c4f922f5e2afe467a0d763f
Parents: dc8434a
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 21 17:52:03 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 21 17:52:03 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 4 
 .../hadoop-common/src/main/resources/core-default.xml| 4 ++--
 .../hadoop-hdfs/src/site/markdown/TransparentEncryption.md   | 2 +-
 3 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4f1081/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 416b819..aff9368 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -593,6 +593,10 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11995. Make jetty version configurable from the maven command line.
 (Sriharsha Devineni via wheat9)
 
+HADOOP-10366. Add whitespaces between classes for values in
+core-default.xml to fit better in browser.
+(kanaka kumar avvaru via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4f1081/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 97e01a8..a1bc780 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -466,7 +466,7 @@ for ldap providers in the same way as above does.
 
 property
   nameio.serializations/name
-  
valueorg.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization/value
+  valueorg.apache.hadoop.io.serializer.WritableSerialization, 
org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization, 
org.apache.hadoop.io.serializer.avro.AvroReflectSerialization/value
   descriptionA list of serialization classes that can be used for
   obtaining serializers and deserializers./description
 /property
@@ -1655,7 +1655,7 @@ for ldap providers in the same way as above does.
 
 property
   namehadoop.security.crypto.codec.classes.aes.ctr.nopadding/name
-  
valueorg.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec/value
+  valueorg.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, 
org.apache.hadoop.crypto.JceAesCtrCryptoCodec/value
   description
 Comma-separated list of crypto codec implementations for 
AES/CTR/NoPadding. 
 The first implementation will be used if available, others are fallbacks.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4f1081/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
index aa2acbd..05e4249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
@@ -126,7 +126,7 @@ The prefix for a given crypto codec, contains a 
comma-separated list of implemen
 
  hadoop.security.crypto.codec.classes.aes.ctr.nopadding
 
-Default: 
`org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec`
+Default: `org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, 
org.apache.hadoop.crypto.JceAesCtrCryptoCodec`
 
 Comma-separated list of crypto codec implementations for AES/CTR/NoPadding. 
The first implementation will be used if available, others are fallbacks.
 



hadoop git commit: HDFS-8451. DFSClient probe for encryption testing interprets empty URI property for enabled. Contributed by Steve Loughran.

2015-05-21 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0b909d028 - 61d7ccb27


HDFS-8451. DFSClient probe for encryption testing interprets empty URI property 
for enabled. Contributed by Steve Loughran.

(cherry picked from commit 05e04f34f27149537fdb89f46af26bee14531ca4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61d7ccb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61d7ccb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61d7ccb2

Branch: refs/heads/branch-2
Commit: 61d7ccb27e9834a42992c2a59637062fcf84a5a4
Parents: 0b909d0
Author: Xiaoyu Yao x...@apache.org
Authored: Thu May 21 11:58:00 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Thu May 21 11:59:11 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  9 ++--
 .../java/org/apache/hadoop/hdfs/DFSUtil.java| 22 
 .../apache/hadoop/hdfs/KeyProviderCache.java|  4 ++--
 .../org/apache/hadoop/hdfs/TestDFSUtil.java | 18 
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  2 +-
 6 files changed, 49 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61d7ccb2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9e5f51d..da95c60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -552,6 +552,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8404. Pending block replication can get stuck using older genstamp
 (Nathan Roberts via kihwal)
 
+HDFS-8451. DFSClient probe for encryption testing interprets empty URI
+property for enabled. (Steve Loughran via xyao)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61d7ccb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 522adda..6f7f197 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3179,10 +3179,15 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  /**
+   * Probe for encryption enabled on this filesystem.
+   * See {@link DFSUtil#isHDFSEncryptionEnabled(Configuration)}
+   * @return true if encryption is enabled
+   */
   public boolean isHDFSEncryptionEnabled() {
-return conf.get(
-DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, null) != null;
+return DFSUtil.isHDFSEncryptionEnabled(this.conf);
   }
+
   /**
* Returns the SaslDataTransferClient configured for this DFSClient.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61d7ccb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 69fc470..76ebaac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -147,8 +147,8 @@ public class DFSUtil {
   a.isDecommissioned() ? 1 : -1;
   }
 };
-
-  
+
+
   /**
* Comparator for sorting DataNodeInfo[] based on decommissioned/stale 
states.
* Decommissioned/stale nodes are moved to the end of the array on sorting
@@ -1495,9 +1495,9 @@ public class DFSUtil {
   public static KeyProvider createKeyProvider(
   final Configuration conf) throws IOException {
 final String providerUriStr =
-conf.get(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, null);
+conf.getTrimmed(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, );
 // No provider set in conf
-if (providerUriStr == null) {
+if (providerUriStr.isEmpty()) {
   return null;
 }
 final URI providerUri;
@@ -1548,4 +1548,18 @@ public class DFSUtil {
   public static int getSmallBufferSize(Configuration conf) {
 return Math.min(getIoFileBufferSize(conf) / 2, 512);
   }
+
+  /**
+   * Probe for HDFS Encryption being enabled; this uses the value of
+   * the option {@link 

hadoop git commit: HDFS-8451. DFSClient probe for encryption testing interprets empty URI property for enabled. Contributed by Steve Loughran.

2015-05-21 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2b6bcfdaf - 05e04f34f


HDFS-8451. DFSClient probe for encryption testing interprets empty URI property 
for enabled. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05e04f34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05e04f34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05e04f34

Branch: refs/heads/trunk
Commit: 05e04f34f27149537fdb89f46af26bee14531ca4
Parents: 2b6bcfd
Author: Xiaoyu Yao x...@apache.org
Authored: Thu May 21 11:58:00 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Thu May 21 11:58:00 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  9 ++--
 .../java/org/apache/hadoop/hdfs/DFSUtil.java| 22 
 .../apache/hadoop/hdfs/KeyProviderCache.java|  4 ++--
 .../org/apache/hadoop/hdfs/TestDFSUtil.java | 18 
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  2 +-
 6 files changed, 49 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05e04f34/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9cfad7d..e830421 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -886,6 +886,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8404. Pending block replication can get stuck using older genstamp
 (Nathan Roberts via kihwal)
 
+HDFS-8451. DFSClient probe for encryption testing interprets empty URI
+property for enabled. (Steve Loughran via xyao)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05e04f34/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index a2b9760..60e5577 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3205,10 +3205,15 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  /**
+   * Probe for encryption enabled on this filesystem.
+   * See {@link DFSUtil#isHDFSEncryptionEnabled(Configuration)}
+   * @return true if encryption is enabled
+   */
   public boolean isHDFSEncryptionEnabled() {
-return conf.get(
-DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, null) != null;
+return DFSUtil.isHDFSEncryptionEnabled(this.conf);
   }
+
   /**
* Returns the SaslDataTransferClient configured for this DFSClient.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05e04f34/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 5f501c1..cae56c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -145,8 +145,8 @@ public class DFSUtil {
   a.isDecommissioned() ? 1 : -1;
   }
 };
-
-  
+
+
   /**
* Comparator for sorting DataNodeInfo[] based on decommissioned/stale 
states.
* Decommissioned/stale nodes are moved to the end of the array on sorting
@@ -1460,9 +1460,9 @@ public class DFSUtil {
   public static KeyProvider createKeyProvider(
   final Configuration conf) throws IOException {
 final String providerUriStr =
-conf.get(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, null);
+conf.getTrimmed(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, );
 // No provider set in conf
-if (providerUriStr == null) {
+if (providerUriStr.isEmpty()) {
   return null;
 }
 final URI providerUri;
@@ -1513,4 +1513,18 @@ public class DFSUtil {
   public static int getSmallBufferSize(Configuration conf) {
 return Math.min(getIoFileBufferSize(conf) / 2, 512);
   }
+
+  /**
+   * Probe for HDFS Encryption being enabled; this uses the value of
+   * the option {@link DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI},
+   * returning true if that property contains a 

[Hadoop Wiki] Update of dineshs/IsolatingYarnAppsInDockerContainers by Abin Shahab

2015-05-21 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The dineshs/IsolatingYarnAppsInDockerContainers page has been changed by Abin 
Shahab:
https://wiki.apache.org/hadoop/dineshs/IsolatingYarnAppsInDockerContainers?action=diffrev1=3rev2=4

  = Isolating YARN Applications in Docker Containers =
  
  The Docker executor for YARN involves work on YARN along with its counterpart 
in Docker to forge the necessary API end points.  The purpose of this page is 
to collect related tickets across both projects in one location.
+ 
+ == May 2015 Update ==
+ 
+ The initial implementation of Docker Container Executor required that
+ all YARN containers be launched in a Docker container.  While this
+ approach allowed us to more quickly get hands-on experiencing bringing
+ Docker to YARN, it's not practical for production clusters.  Also, we
+ noticed that a production-quality implementation of Docker Container
+ Executor would require borrowing a large amount of important -- and
+ security-sensitive -- code and configuration from Linux Container
+ Executor (and it's supporting binary).
+ 
+ As a result, we've concluded that the best way to bring Docker to a
+ production cluster is to add Docker as a feature of the Linux
+ Container Executor.  With this features, individual jobs -- and even
+ individual YARN containers -- can be configured to use Docker
+ containers, while other jobs can continue to use regular Linux
+ containers.
+ 
+ Based on this conclusion, we have developed the following plan for
+ moving forward:
+ 
+ * Add to the Linux Container Executor (LCE) the option to launch
+ containers using Docker.
+ 
+ * Add this functionality in a way that leverages LCE's existing
+ ability to create cgroups to obtain cgroups for Docker containers.
+ 
+ * Add the ability to load the Docker image from a localized tar
+ file (in addition to being able to load from a Docker registry).
+ 
+ * Extend our Docker work to behave correctly in Kerberized clusters.
+ 
+ * Verify that the distributed cache works correctly in Docker
+ containers (we think it does, but we haven't fully tested).
+ 
+ We are targeting a beta release of this functionality for Hadoop 2.8.
  
  == Motivation ==
  


hadoop git commit: YARN-3675. FairScheduler: RM quits when node removal races with continuous-scheduling on the same node. (Anubhav Dhoot via kasha)

2015-05-21 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 05e04f34f - 451376186


YARN-3675. FairScheduler: RM quits when node removal races with 
continuous-scheduling on the same node. (Anubhav Dhoot via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45137618
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45137618
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45137618

Branch: refs/heads/trunk
Commit: 4513761869c732cf2f462763043067ebf8749df7
Parents: 05e04f3
Author: Karthik Kambatla ka...@apache.org
Authored: Thu May 21 13:38:30 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Thu May 21 13:44:42 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../scheduler/fair/FairScheduler.java   | 14 ++-
 .../scheduler/fair/TestFairScheduler.java   | 44 
 3 files changed, 59 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45137618/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e5a9ee9..0d1e067 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -541,6 +541,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3646. Applications are getting stuck some times in case of retry
 policy forever. (Raju Bairishetti via devaraj)
 
+YARN-3675. FairScheduler: RM quits when node removal races with 
+continuous-scheduling on the same node. (Anubhav Dhoot via kasha)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45137618/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index f481de5..07b3271 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1039,13 +1039,23 @@ public class FairScheduler extends
   nodes.get(n1).getAvailableResource());
 }
   }
-  
-  private synchronized void attemptScheduling(FSSchedulerNode node) {
+
+  @VisibleForTesting
+  synchronized void attemptScheduling(FSSchedulerNode node) {
 if (rmContext.isWorkPreservingRecoveryEnabled()
  !rmContext.isSchedulerReadyForAllocatingContainers()) {
   return;
 }
 
+final NodeId nodeID = node.getNodeID();
+if (!nodes.containsKey(nodeID)) {
+  // The node might have just been removed while this thread was waiting
+  // on the synchronized lock before it entered this synchronized method
+  LOG.info(Skipping scheduling as the node  + nodeID +
+   has been removed);
+  return;
+}
+
 // Assign new containers...
 // 1. Check for reserved applications
 // 2. Schedule if there are no reservations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45137618/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 69e0a8c..94fdc1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -3890,6 +3890,50 @@ public 

hadoop git commit: YARN-3675. FairScheduler: RM quits when node removal races with continuous-scheduling on the same node. (Anubhav Dhoot via kasha)

2015-05-21 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 61d7ccb27 - e8ac88d4f


YARN-3675. FairScheduler: RM quits when node removal races with 
continuous-scheduling on the same node. (Anubhav Dhoot via kasha)

(cherry picked from commit a8b50e46737c11936ba72c427da69b2365a07aac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8ac88d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8ac88d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8ac88d4

Branch: refs/heads/branch-2
Commit: e8ac88d4fe8aaef9d2e5fb76e6bc50223ff0e495
Parents: 61d7ccb
Author: Karthik Kambatla ka...@apache.org
Authored: Thu May 21 13:38:30 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Thu May 21 13:39:40 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  4 ++
 .../scheduler/fair/FairScheduler.java   | 14 ++-
 .../scheduler/fair/TestFairScheduler.java   | 44 
 3 files changed, 60 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8ac88d4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 12cf925..b7e66d5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -464,6 +464,7 @@ Release 2.7.1 - UNRELEASED
 YARN-3493. RM fails to come up with error Failed to load/recover state 
 when mem settings are changed. (Jian He via wangda)
 
+ HEAD
 YARN-3626. On Windows localized resources are not moved to the front
 of the classpath when they should be. (Craig Welch via xgong)
 
@@ -499,6 +500,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3646. Applications are getting stuck some times in case of retry
 policy forever. (Raju Bairishetti via devaraj)
 
+YARN-3675. FairScheduler: RM quits when node removal races with 
+continuous-scheduling on the same node. (Anubhav Dhoot via kasha)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8ac88d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index f481de5..07b3271 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1039,13 +1039,23 @@ public class FairScheduler extends
   nodes.get(n1).getAvailableResource());
 }
   }
-  
-  private synchronized void attemptScheduling(FSSchedulerNode node) {
+
+  @VisibleForTesting
+  synchronized void attemptScheduling(FSSchedulerNode node) {
 if (rmContext.isWorkPreservingRecoveryEnabled()
  !rmContext.isSchedulerReadyForAllocatingContainers()) {
   return;
 }
 
+final NodeId nodeID = node.getNodeID();
+if (!nodes.containsKey(nodeID)) {
+  // The node might have just been removed while this thread was waiting
+  // on the synchronized lock before it entered this synchronized method
+  LOG.info(Skipping scheduling as the node  + nodeID +
+   has been removed);
+  return;
+}
+
 // Assign new containers...
 // 1. Check for reserved applications
 // 2. Schedule if there are no reservations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8ac88d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index ee20863..0e6367d 100644
--- 

hadoop git commit: HADOOP-12016. Typo in FileSystem::listStatusIterator. Contributed by Arthur Vigil.

2015-05-21 Thread jghoman
Repository: hadoop
Updated Branches:
  refs/heads/trunk 451376186 - 4fc942a84


HADOOP-12016. Typo in FileSystem::listStatusIterator. Contributed by Arthur 
Vigil.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4fc942a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4fc942a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4fc942a8

Branch: refs/heads/trunk
Commit: 4fc942a84f492065bacfa30cf8b624dc6a5f062b
Parents: 4513761
Author: Jakob Homan jgho...@gmail.com
Authored: Thu May 21 14:50:03 2015 -0700
Committer: Jakob Homan jgho...@gmail.com
Committed: Thu May 21 14:50:03 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/fs/FileSystem.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fc942a8/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index aff9368..32f0630 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -218,6 +218,9 @@ Trunk (Unreleased)
 HADOOP-10993. Dump java command line to *.out file
 (Kengo Seki via vinayakumarb)
 
+HADOOP-12016. Typo in FileSystem::listStatusIterator 
+(Arthur Vigil via jghoman)
+
   BUG FIXES
 
 HADOOP-11473. test-patch says -1 overall even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fc942a8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 33d7c88..01d4b27 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1720,7 +1720,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   @Override
   public LocatedFileStatus next() throws IOException {
 if (!hasNext()) {
-  throw new NoSuchElementException(No more entry in  + f);
+  throw new NoSuchElementException(No more entries in  + f);
 }
 FileStatus result = stats[i++];
 BlockLocation[] locs = result.isFile() ?



hadoop git commit: HDFS-8294. Erasure Coding: Fix Findbug warnings present in erasure coding. Contributed by Rakesh R.

2015-05-21 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 9fdb5be67 - 579677d02


HDFS-8294. Erasure Coding: Fix Findbug warnings present in erasure coding. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/579677d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/579677d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/579677d0

Branch: refs/heads/HDFS-7285
Commit: 579677d0222da0f5e6759c4f0ccd6c2ab2dbeef1
Parents: 9fdb5be
Author: Zhe Zhang zhezh...@cloudera.com
Authored: Thu May 21 14:40:14 2015 -0700
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Thu May 21 14:40:14 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +++
 .../org/apache/hadoop/hdfs/DFSStripedOutputStream.java  | 12 ++--
 .../BlockInfoStripedUnderConstruction.java  |  3 +++
 .../datanode/erasurecode/ErasureCodingWorker.java   |  4 ++--
 .../hdfs/server/namenode/ErasureCodingZoneManager.java  |  4 ++--
 .../org/apache/hadoop/hdfs/util/StripedBlockUtil.java   |  6 +++---
 6 files changed, 19 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/579677d0/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 3bdff6f..c986f19 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -247,3 +247,6 @@
 
 HDFS-8186. Erasure coding: Make block placement policy for EC file 
configurable.
 (Walter Su via zhz)
+
+HDFS-8294. Erasure Coding: Fix Findbug warnings present in erasure coding.
+(Rakesh R via zhz)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/579677d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 8eed6ad..515ce0c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -276,11 +276,11 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
 return getCurrentStreamer().getIndex();
   }
 
-  StripedDataStreamer getCurrentStreamer() {
+  private synchronized StripedDataStreamer getCurrentStreamer() {
 return (StripedDataStreamer)streamer;
   }
 
-  private StripedDataStreamer setCurrentStreamer(int i) {
+  private synchronized StripedDataStreamer setCurrentStreamer(int i) {
 streamer = streamers.get(i);
 return getCurrentStreamer();
   }
@@ -344,8 +344,8 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
 int ckOff = 0;
 while (byteBuffer.remaining()  0) {
   DFSPacket p = createPacket(packetSize, chunksPerPacket,
-  streamer.getBytesCurBlock(),
-  streamer.getAndIncCurrentSeqno(), false);
+  getCurrentStreamer().getBytesCurBlock(),
+  getCurrentStreamer().getAndIncCurrentSeqno(), false);
   int maxBytesToPacket = p.getMaxChunks() * bytesPerChecksum;
   int toWrite = byteBuffer.remaining()  maxBytesToPacket ?
   maxBytesToPacket: byteBuffer.remaining();
@@ -353,7 +353,7 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
   p.writeChecksum(checksumBuf, ckOff, ckLen);
   ckOff += ckLen;
   p.writeData(byteBuffer, toWrite);
-  streamer.incBytesCurBlock(toWrite);
+  getCurrentStreamer().incBytesCurBlock(toWrite);
   packets.add(p);
 }
 return packets;
@@ -529,7 +529,7 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
 if (!current.isFailed()) {
   try {
 for (DFSPacket p : generatePackets(buffer, checksumBuf)) {
-  streamer.waitAndQueuePacket(p);
+  getCurrentStreamer().waitAndQueuePacket(p);
 }
 endBlock();
   } catch(Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/579677d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
 

hadoop git commit: YARN-3411. [Storage implementation] explore the native HBase write schema for storage (Vrushali C via sjlee)

2015-05-21 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 463e070a8 - 7a3068854


YARN-3411. [Storage implementation] explore the native HBase write schema for 
storage (Vrushali C via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a306885
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a306885
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a306885

Branch: refs/heads/YARN-2928
Commit: 7a3068854d27eadae1c57545988f5b2029bf119a
Parents: 463e070
Author: Sangjin Lee sj...@apache.org
Authored: Thu May 21 14:11:01 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu May 21 14:11:01 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../collector/TimelineCollectorManager.java |  19 +
 .../storage/EntityColumnDetails.java| 110 ++
 .../storage/EntityColumnFamily.java |  95 +
 .../storage/HBaseTimelineWriterImpl.java| 225 
 .../server/timelineservice/storage/Range.java   |  59 
 .../storage/TimelineEntitySchemaConstants.java  |  71 
 .../storage/TimelineSchemaCreator.java  | 231 +
 .../storage/TimelineWriterUtils.java| 344 +++
 .../storage/TestHBaseTimelineWriterImpl.java| 292 
 10 files changed, 1448 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a306885/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 808fdf7..975e3c6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -73,6 +73,8 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. (
 Sangjin Lee via junping_du)
 
+YARN-3411. [Storage implementation] explore the native HBase write schema
+for storage (Vrushali C via sjlee)
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a306885/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 61fa1d7..7e2d4e0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -59,6 +59,13 @@ public class TimelineCollectorManager extends 
AbstractService {
 super.serviceInit(conf);
   }
 
+  @Override
+  protected void serviceStart() throws Exception {
+super.serviceStart();
+if (writer != null) {
+  writer.start();
+}
+  }
 
   // access to this map is synchronized with the map itself
   private final MapApplicationId, TimelineCollector collectors =
@@ -151,4 +158,16 @@ public class TimelineCollectorManager extends 
AbstractService {
 return collectors.containsKey(appId);
   }
 
+  @Override
+  protected void serviceStop() throws Exception {
+if (collectors != null  collectors.size()  1) {
+  for (TimelineCollector c : collectors.values()) {
+c.serviceStop();
+  }
+}
+if (writer != null) {
+  writer.close();
+}
+super.serviceStop();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a306885/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
new file mode 100644
index 000..2894c41
--- /dev/null
+++ 

hadoop git commit: HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp. Contributed by Haohui Mai.

2015-05-21 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0305316d6 - 2b6bcfdaf


HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b6bcfda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b6bcfda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b6bcfda

Branch: refs/heads/trunk
Commit: 2b6bcfdafa91223a4116e3e9304579f5f91dccac
Parents: 0305316
Author: Haohui Mai whe...@apache.org
Authored: Thu May 21 08:05:10 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu May 21 08:08:28 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 324 ++-
 .../hdfs/server/namenode/FSDirectory.java   |  91 --
 .../hdfs/server/namenode/FSEditLogLoader.java   |  15 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 280 +++-
 5 files changed, 371 insertions(+), 342 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6bcfda/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 77d7369..9cfad7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -577,6 +577,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-4383. Document the lease limits. (Arshad Mohammad via aajisaka)
 
+HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp.
+(wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6bcfda/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 1ff0899..307bd59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -18,11 +18,27 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
+import org.apache.commons.io.Charsets;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -34,15 +50,22 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.util.ChunkedArrayList;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
+import static 
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
+import static org.apache.hadoop.util.Time.now;
+
 class FSDirWriteFileOp {
   private FSDirWriteFileOp() {}
   static boolean unprotectedRemoveBlock(
@@ -278,6 +301,210 @@ class FSDirWriteFileOp {
   }
 
   /**
+   * Create a new file or overwrite an existing filebr
+   *
+   * 

hadoop git commit: YARN-3646. Applications are getting stuck some times in case of retry policy forever. Contributed by Raju Bairishetti.

2015-05-21 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/trunk a5def5808 - 0305316d6


YARN-3646. Applications are getting stuck some times in case of retry
policy forever. Contributed by Raju Bairishetti.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0305316d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0305316d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0305316d

Branch: refs/heads/trunk
Commit: 0305316d6932e6f1a05021354d77b6934e57e171
Parents: a5def58
Author: Devaraj K deva...@apache.org
Authored: Thu May 21 20:14:44 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu May 21 20:14:44 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../yarn/client/api/impl/TestYarnClient.java| 32 
 .../org/apache/hadoop/yarn/client/RMProxy.java  | 15 -
 3 files changed, 43 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0305316d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d1d2258..e5a9ee9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -538,6 +538,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3694. Fix dead link for TimelineServer REST API.
 (Jagadesh Kiran N via aajisaka)
 
+YARN-3646. Applications are getting stuck some times in case of retry
+policy forever. (Raju Bairishetti via devaraj)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0305316d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 511fa4a..bc40b9a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -1265,4 +1265,36 @@ public class TestYarnClient {
 ReservationSystemTestUtil.reservationQ);
 return request;
   }
+
+  @Test(timeout = 3, expected = ApplicationNotFoundException.class)
+  public void testShouldNotRetryForeverForNonNetworkExceptions() throws 
Exception {
+YarnConfiguration conf = new YarnConfiguration();
+conf.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, -1);
+
+ResourceManager rm = null;
+YarnClient yarnClient = null;
+try {
+  // start rm
+  rm = new ResourceManager();
+  rm.init(conf);
+  rm.start();
+
+  yarnClient = YarnClient.createYarnClient();
+  yarnClient.init(conf);
+  yarnClient.start();
+
+  // create invalid application id
+  ApplicationId appId = ApplicationId.newInstance(1430126768L, 10645);
+
+  // RM should throw ApplicationNotFoundException exception
+  yarnClient.getApplicationReport(appId);
+} finally {
+  if (yarnClient != null) {
+yarnClient.stop();
+  }
+  if (rm != null) {
+rm.stop();
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0305316d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
index fa8d642..28628f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
@@ -224,19 +224,20 @@ public class RMProxyT {
   failoverSleepBaseMs, failoverSleepMaxMs);
 }
 
-if (waitForEver) {
-  return RetryPolicies.RETRY_FOREVER;
-}
-
 if (rmConnectionRetryIntervalMS  0) {
   throw new YarnRuntimeException(Invalid Configuration.  +
   YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS +
should not be negative.);
 }
 
-RetryPolicy retryPolicy =
-RetryPolicies.retryUpToMaximumTimeWithFixedSleep(rmConnectWaitMS,
-rmConnectionRetryIntervalMS, 

hadoop git commit: HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp. Contributed by Haohui Mai.

2015-05-21 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 157ecb224 - 0b909d028


HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b909d02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b909d02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b909d02

Branch: refs/heads/branch-2
Commit: 0b909d028fd7279398808893c83ff6bad68f67b0
Parents: 157ecb2
Author: Haohui Mai whe...@apache.org
Authored: Thu May 21 08:05:10 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu May 21 08:06:13 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 324 ++-
 .../hdfs/server/namenode/FSDirectory.java   |  91 --
 .../hdfs/server/namenode/FSEditLogLoader.java   |  15 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 280 +++-
 5 files changed, 371 insertions(+), 342 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b909d02/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 414bba5..9e5f51d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -240,6 +240,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-4383. Document the lease limits. (Arshad Mohammad via aajisaka)
 
+HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp.
+(wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b909d02/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 1ff0899..307bd59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -18,11 +18,27 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
+import org.apache.commons.io.Charsets;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -34,15 +50,22 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.util.ChunkedArrayList;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
+import static 
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
+import static org.apache.hadoop.util.Time.now;
+
 class FSDirWriteFileOp {
   private FSDirWriteFileOp() {}
   static boolean unprotectedRemoveBlock(
@@ -278,6 +301,210 @@ class FSDirWriteFileOp {
   }
 
   /**
+   * Create a new file or overwrite an existing filebr
+   *
+   

hadoop git commit: YARN-3646. Applications are getting stuck some times in case of retry policy forever. Contributed by Raju Bairishetti.

2015-05-21 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 7a8e076ff - b68c338b1


YARN-3646. Applications are getting stuck some times in case of retry
policy forever. Contributed by Raju Bairishetti.

(cherry picked from commit 0305316d6932e6f1a05021354d77b6934e57e171)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b68c338b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b68c338b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b68c338b

Branch: refs/heads/branch-2.7
Commit: b68c338b17ea8cf98af9b72f8f05b923ccdd2f26
Parents: 7a8e076
Author: Devaraj K deva...@apache.org
Authored: Thu May 21 20:14:44 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu May 21 20:16:53 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../yarn/client/api/impl/TestYarnClient.java| 32 
 .../org/apache/hadoop/yarn/client/RMProxy.java  | 15 -
 3 files changed, 43 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b68c338b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4b325ad..6b95c41 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -115,6 +115,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3694. Fix dead link for TimelineServer REST API.
 (Jagadesh Kiran N via aajisaka)
 
+YARN-3646. Applications are getting stuck some times in case of retry
+policy forever. (Raju Bairishetti via devaraj)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b68c338b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index de669f2..738b0a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -1214,4 +1214,36 @@ public class TestYarnClient {
 ReservationSystemTestUtil.reservationQ);
 return request;
   }
+
+  @Test(timeout = 3, expected = ApplicationNotFoundException.class)
+  public void testShouldNotRetryForeverForNonNetworkExceptions() throws 
Exception {
+YarnConfiguration conf = new YarnConfiguration();
+conf.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, -1);
+
+ResourceManager rm = null;
+YarnClient yarnClient = null;
+try {
+  // start rm
+  rm = new ResourceManager();
+  rm.init(conf);
+  rm.start();
+
+  yarnClient = YarnClient.createYarnClient();
+  yarnClient.init(conf);
+  yarnClient.start();
+
+  // create invalid application id
+  ApplicationId appId = ApplicationId.newInstance(1430126768L, 10645);
+
+  // RM should throw ApplicationNotFoundException exception
+  yarnClient.getApplicationReport(appId);
+} finally {
+  if (yarnClient != null) {
+yarnClient.stop();
+  }
+  if (rm != null) {
+rm.stop();
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b68c338b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
index fa8d642..28628f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
@@ -224,19 +224,20 @@ public class RMProxyT {
   failoverSleepBaseMs, failoverSleepMaxMs);
 }
 
-if (waitForEver) {
-  return RetryPolicies.RETRY_FOREVER;
-}
-
 if (rmConnectionRetryIntervalMS  0) {
   throw new YarnRuntimeException(Invalid Configuration.  +
   YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS +
should not be negative.);
 }
 
-RetryPolicy retryPolicy =
-

hadoop git commit: YARN-3646. Applications are getting stuck some times in case of retry policy forever. Contributed by Raju Bairishetti.

2015-05-21 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6d7e7ef1c - 157ecb224


YARN-3646. Applications are getting stuck some times in case of retry
policy forever. Contributed by Raju Bairishetti.

(cherry picked from commit 0305316d6932e6f1a05021354d77b6934e57e171)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/157ecb22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/157ecb22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/157ecb22

Branch: refs/heads/branch-2
Commit: 157ecb22418e22abf7078d0d0eb2e2249b8941b8
Parents: 6d7e7ef
Author: Devaraj K deva...@apache.org
Authored: Thu May 21 20:14:44 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu May 21 20:16:09 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../yarn/client/api/impl/TestYarnClient.java| 32 
 .../org/apache/hadoop/yarn/client/RMProxy.java  | 15 -
 3 files changed, 43 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/157ecb22/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2939373..12cf925 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -496,6 +496,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3694. Fix dead link for TimelineServer REST API.
 (Jagadesh Kiran N via aajisaka)
 
+YARN-3646. Applications are getting stuck some times in case of retry
+policy forever. (Raju Bairishetti via devaraj)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/157ecb22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 511fa4a..bc40b9a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -1265,4 +1265,36 @@ public class TestYarnClient {
 ReservationSystemTestUtil.reservationQ);
 return request;
   }
+
+  @Test(timeout = 3, expected = ApplicationNotFoundException.class)
+  public void testShouldNotRetryForeverForNonNetworkExceptions() throws 
Exception {
+YarnConfiguration conf = new YarnConfiguration();
+conf.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, -1);
+
+ResourceManager rm = null;
+YarnClient yarnClient = null;
+try {
+  // start rm
+  rm = new ResourceManager();
+  rm.init(conf);
+  rm.start();
+
+  yarnClient = YarnClient.createYarnClient();
+  yarnClient.init(conf);
+  yarnClient.start();
+
+  // create invalid application id
+  ApplicationId appId = ApplicationId.newInstance(1430126768L, 10645);
+
+  // RM should throw ApplicationNotFoundException exception
+  yarnClient.getApplicationReport(appId);
+} finally {
+  if (yarnClient != null) {
+yarnClient.stop();
+  }
+  if (rm != null) {
+rm.stop();
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/157ecb22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
index fa8d642..28628f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
@@ -224,19 +224,20 @@ public class RMProxyT {
   failoverSleepBaseMs, failoverSleepMaxMs);
 }
 
-if (waitForEver) {
-  return RetryPolicies.RETRY_FOREVER;
-}
-
 if (rmConnectionRetryIntervalMS  0) {
   throw new YarnRuntimeException(Invalid Configuration.  +
   YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS +
should not be negative.);
 }
 
-RetryPolicy retryPolicy =
-

hadoop git commit: YARN-3694. Fix dead link for TimelineServer REST API. Contributed by Jagadesh Kiran N.

2015-05-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 ce45e4e82 - 7a8e076ff


YARN-3694. Fix dead link for TimelineServer REST API. Contributed by Jagadesh 
Kiran N.

(cherry picked from commit a5def580879428bc7af3c030ef33554e0519f072)
(cherry picked from commit 6d7e7ef1c4df613004f20911136db5be8b474265)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a8e076f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a8e076f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a8e076f

Branch: refs/heads/branch-2.7
Commit: 7a8e076ffb4e5e4a12514e3c95843dc89d377974
Parents: ce45e4e
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 21 23:14:44 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 21 23:15:59 2015 +0900

--
 hadoop-project/src/site/site.xml | 2 +-
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a8e076f/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index efe0d18..5bbce8a 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -134,7 +134,7 @@
   item name=Introduction 
href=hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html/
   item name=Resource Manager 
href=hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html/
   item name=Node Manager 
href=hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html/
-  item name=Timeline Server 
href=TimelineServer.html#Timeline_Server_REST_API_v1/
+  item name=Timeline Server 
href=hadoop-yarn/hadoop-yarn-site/TimelineServer.html#Timeline_Server_REST_API_v1/
 /menu
 
 menu name=Hadoop Compatible File Systems inherit=top

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a8e076f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6d97412..4b325ad 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -112,6 +112,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3609. Load node labels from storage inside RM serviceStart. (Wangda
 Tan via jianhe)
 
+YARN-3694. Fix dead link for TimelineServer REST API.
+(Jagadesh Kiran N via aajisaka)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



hadoop git commit: YARN-3694. Fix dead link for TimelineServer REST API. Contributed by Jagadesh Kiran N.

2015-05-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b578f76be - 6d7e7ef1c


YARN-3694. Fix dead link for TimelineServer REST API. Contributed by Jagadesh 
Kiran N.

(cherry picked from commit a5def580879428bc7af3c030ef33554e0519f072)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d7e7ef1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d7e7ef1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d7e7ef1

Branch: refs/heads/branch-2
Commit: 6d7e7ef1c4df613004f20911136db5be8b474265
Parents: b578f76
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 21 23:14:44 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 21 23:15:17 2015 +0900

--
 hadoop-project/src/site/site.xml | 2 +-
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d7e7ef1/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index b525920..7eff8d8 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -133,7 +133,7 @@
   item name=Introduction 
href=hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html/
   item name=Resource Manager 
href=hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html/
   item name=Node Manager 
href=hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html/
-  item name=Timeline Server 
href=TimelineServer.html#Timeline_Server_REST_API_v1/
+  item name=Timeline Server 
href=hadoop-yarn/hadoop-yarn-site/TimelineServer.html#Timeline_Server_REST_API_v1/
 /menu
 
 menu name=Hadoop Compatible File Systems inherit=top

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d7e7ef1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a5637da..2939373 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -493,6 +493,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3609. Load node labels from storage inside RM serviceStart. (Wangda
 Tan via jianhe)
 
+YARN-3694. Fix dead link for TimelineServer REST API.
+(Jagadesh Kiran N via aajisaka)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



hadoop git commit: YARN-3694. Fix dead link for TimelineServer REST API. Contributed by Jagadesh Kiran N.

2015-05-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0e4f1081c - a5def5808


YARN-3694. Fix dead link for TimelineServer REST API. Contributed by Jagadesh 
Kiran N.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5def580
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5def580
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5def580

Branch: refs/heads/trunk
Commit: a5def580879428bc7af3c030ef33554e0519f072
Parents: 0e4f108
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 21 23:14:44 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 21 23:14:44 2015 +0900

--
 hadoop-project/src/site/site.xml | 2 +-
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5def580/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 7234881..f3bb458 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -134,7 +134,7 @@
   item name=Introduction 
href=hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html/
   item name=Resource Manager 
href=hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html/
   item name=Node Manager 
href=hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html/
-  item name=Timeline Server 
href=TimelineServer.html#Timeline_Server_REST_API_v1/
+  item name=Timeline Server 
href=hadoop-yarn/hadoop-yarn-site/TimelineServer.html#Timeline_Server_REST_API_v1/
 /menu
 
 menu name=Hadoop Compatible File Systems inherit=top

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5def580/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3cba027..d1d2258 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -535,6 +535,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3609. Load node labels from storage inside RM serviceStart. (Wangda
 Tan via jianhe)
 
+YARN-3694. Fix dead link for TimelineServer REST API.
+(Jagadesh Kiran N via aajisaka)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[2/2] hadoop git commit: YARN-3684. Changed ContainerExecutor's primary lifecycle methods to use a more extensible mechanism of context objects. Contributed by Sidharta Seethana.

2015-05-21 Thread vinodkv
YARN-3684. Changed ContainerExecutor's primary lifecycle methods to use a more 
extensible mechanism of context objects. Contributed by Sidharta Seethana.

(cherry picked from commit 53fafcf061616516c24e2e2007a66a93d23d3e25)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/673bd970
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/673bd970
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/673bd970

Branch: refs/heads/branch-2
Commit: 673bd9708fb85a5f1e07ee80b6367537ce3bb6cd
Parents: e8ac88d
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu May 21 15:50:23 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu May 21 15:51:19 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../server/nodemanager/ContainerExecutor.java   |  80 +
 .../nodemanager/DefaultContainerExecutor.java   |  46 +++--
 .../server/nodemanager/DeletionService.java |  13 +-
 .../nodemanager/DockerContainerExecutor.java|  45 +++--
 .../nodemanager/LinuxContainerExecutor.java |  62 +--
 .../WindowsSecureContainerExecutor.java | 175 ++-
 .../launcher/ContainerLaunch.java   |  22 ++-
 .../launcher/RecoveredContainerLaunch.java  |   7 +-
 .../localizer/ResourceLocalizationService.java  |  17 +-
 .../executor/ContainerLivenessContext.java  |  70 
 .../executor/ContainerReacquisitionContext.java |  71 
 .../executor/ContainerSignalContext.java|  83 +
 .../executor/ContainerStartContext.java | 147 
 .../executor/DeletionAsUserContext.java |  91 ++
 .../executor/LocalizerStartContext.java | 122 +
 .../TestDefaultContainerExecutor.java   |  51 --
 .../server/nodemanager/TestDeletionService.java |  19 +-
 .../TestDockerContainerExecutor.java|  14 +-
 .../TestDockerContainerExecutorWithMocks.java   |  42 -
 .../nodemanager/TestLinuxContainerExecutor.java |  72 ++--
 .../TestLinuxContainerExecutorWithMocks.java| 124 +
 .../BaseContainerManagerTest.java   |   8 +-
 .../TestResourceLocalizationService.java|  16 +-
 .../TestLogAggregationService.java  |   9 +-
 .../monitor/TestContainersMonitor.java  |   8 +-
 26 files changed, 1169 insertions(+), 248 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/673bd970/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b7e66d5..6924d8a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -208,6 +208,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3583. Support of NodeLabel object instead of plain String 
 in YarnClient side. (Sunil G via wangda)
 
+YARN-3684. Changed ContainerExecutor's primary lifecycle methods to use a 
more
+extensible mechanism of context objects. (Sidharta Seethana via vinodkv)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/673bd970/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 1c670a1..7029e46 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -22,7 +22,6 @@ import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
-import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -46,6 +45,12 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
+import 

[2/2] hadoop git commit: YARN-3684. Changed ContainerExecutor's primary lifecycle methods to use a more extensible mechanism of context objects. Contributed by Sidharta Seethana.

2015-05-21 Thread vinodkv
YARN-3684. Changed ContainerExecutor's primary lifecycle methods to use a more 
extensible mechanism of context objects. Contributed by Sidharta Seethana.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53fafcf0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53fafcf0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53fafcf0

Branch: refs/heads/trunk
Commit: 53fafcf061616516c24e2e2007a66a93d23d3e25
Parents: 4fc942a
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu May 21 15:50:23 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu May 21 15:50:23 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../server/nodemanager/ContainerExecutor.java   |  80 +
 .../nodemanager/DefaultContainerExecutor.java   |  46 +++--
 .../server/nodemanager/DeletionService.java |  13 +-
 .../nodemanager/DockerContainerExecutor.java|  45 +++--
 .../nodemanager/LinuxContainerExecutor.java |  62 +--
 .../WindowsSecureContainerExecutor.java | 175 ++-
 .../launcher/ContainerLaunch.java   |  22 ++-
 .../launcher/RecoveredContainerLaunch.java  |   7 +-
 .../localizer/ResourceLocalizationService.java  |  17 +-
 .../executor/ContainerLivenessContext.java  |  70 
 .../executor/ContainerReacquisitionContext.java |  71 
 .../executor/ContainerSignalContext.java|  83 +
 .../executor/ContainerStartContext.java | 147 
 .../executor/DeletionAsUserContext.java |  91 ++
 .../executor/LocalizerStartContext.java | 122 +
 .../TestDefaultContainerExecutor.java   |  51 --
 .../server/nodemanager/TestDeletionService.java |  19 +-
 .../TestDockerContainerExecutor.java|  14 +-
 .../TestDockerContainerExecutorWithMocks.java   |  42 -
 .../nodemanager/TestLinuxContainerExecutor.java |  72 ++--
 .../TestLinuxContainerExecutorWithMocks.java| 124 +
 .../BaseContainerManagerTest.java   |   8 +-
 .../TestResourceLocalizationService.java|  16 +-
 .../TestLogAggregationService.java  |   9 +-
 .../monitor/TestContainersMonitor.java  |   8 +-
 26 files changed, 1169 insertions(+), 248 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53fafcf0/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0d1e067..59d6ee6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -253,6 +253,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3583. Support of NodeLabel object instead of plain String 
 in YarnClient side. (Sunil G via wangda)
 
+YARN-3684. Changed ContainerExecutor's primary lifecycle methods to use a 
more
+extensible mechanism of context objects. (Sidharta Seethana via vinodkv)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53fafcf0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 1c670a1..7029e46 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -22,7 +22,6 @@ import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
-import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -46,6 +45,12 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext;
+import 

[1/2] hadoop git commit: YARN-3684. Changed ContainerExecutor's primary lifecycle methods to use a more extensible mechanism of context objects. Contributed by Sidharta Seethana.

2015-05-21 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4fc942a84 - 53fafcf06


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53fafcf0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
index 723ac92..58debc9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
@@ -60,6 +60,11 @@ import 
org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
 import org.apache.hadoop.yarn.server.nodemanager.util.LCEResourcesHandler;
 import org.junit.After;
 import org.junit.Assert;
@@ -208,7 +213,10 @@ public class TestLinuxContainerExecutor {
   Path usercachedir = new Path(dir, ContainerLocalizer.USERCACHE);
   Path userdir = new Path(usercachedir, user);
   Path appcachedir = new Path(userdir, ContainerLocalizer.APPCACHE);
-  exec.deleteAsUser(user, appcachedir);
+  exec.deleteAsUser(new DeletionAsUserContext.Builder()
+  .setUser(user)
+  .setSubDir(appcachedir)
+  .build());
   FileContext.getLocalFSFileContext().delete(usercachedir, true);
 }
   }
@@ -218,7 +226,10 @@ public class TestLinuxContainerExecutor {
 for (String dir : localDirs) {
   Path filecache = new Path(dir, ContainerLocalizer.FILECACHE);
   Path filedir = new Path(filecache, user);
-  exec.deleteAsUser(user, filedir);
+  exec.deleteAsUser(new DeletionAsUserContext.Builder()
+  .setUser(user)
+  .setSubDir(filedir)
+  .build());
 }
   }
 
@@ -229,7 +240,10 @@ public class TestLinuxContainerExecutor {
   String containerId = CONTAINER_ + (id - 1);
   Path appdir = new Path(dir, appId);
   Path containerdir = new Path(appdir, containerId);
-  exec.deleteAsUser(user, containerdir);
+  exec.deleteAsUser(new DeletionAsUserContext.Builder()
+  .setUser(user)
+  .setSubDir(containerdir)
+  .build());
 }
   }
 
@@ -244,7 +258,11 @@ public class TestLinuxContainerExecutor {
 for (String file : files) {
   File f = new File(workSpace, file);
   if (f.exists()) {
-exec.deleteAsUser(user, new Path(file), ws);
+exec.deleteAsUser(new DeletionAsUserContext.Builder()
+.setUser(user)
+.setSubDir(new Path(file))
+.setBasedirs(ws)
+.build());
   }
 }
   }
@@ -310,9 +328,16 @@ public class TestLinuxContainerExecutor {
 Path pidFile = new Path(workDir, pid.txt);
 
 exec.activateContainer(cId, pidFile);
-return exec.launchContainer(container, scriptPath, tokensPath,
-  appSubmitter, appId, workDir, dirsHandler.getLocalDirs(),
-  dirsHandler.getLogDirs());
+return exec.launchContainer(new ContainerStartContext.Builder()
+.setContainer(container)
+.setNmPrivateContainerScriptPath(scriptPath)
+.setNmPrivateTokensPath(tokensPath)
+.setUser(appSubmitter)
+.setAppId(appId)
+.setContainerWorkDir(workDir)
+.setLocalDirs(dirsHandler.getLocalDirs())
+.setLogDirs(dirsHandler.getLogDirs())
+.build());
   }
 
   @Test
@@ -345,8 +370,14 @@ public class TestLinuxContainerExecutor {
 };
 exec.setConf(conf);
 
-exec.startLocalizer(nmPrivateContainerTokensPath, nmAddr, appSubmitter,
-  appId, locId, dirsHandler);
+exec.startLocalizer(new LocalizerStartContext.Builder()
+.setNmPrivateContainerTokens(nmPrivateContainerTokensPath)
+.setNmAddr(nmAddr)
+

[1/2] hadoop git commit: YARN-3684. Changed ContainerExecutor's primary lifecycle methods to use a more extensible mechanism of context objects. Contributed by Sidharta Seethana.

2015-05-21 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e8ac88d4f - 673bd9708


http://git-wip-us.apache.org/repos/asf/hadoop/blob/673bd970/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
index da47ddc..c3a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
@@ -60,6 +60,11 @@ import 
org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
+import 
org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
 import org.apache.hadoop.yarn.server.nodemanager.util.LCEResourcesHandler;
 import org.junit.After;
 import org.junit.Assert;
@@ -208,7 +213,10 @@ public class TestLinuxContainerExecutor {
   Path usercachedir = new Path(dir, ContainerLocalizer.USERCACHE);
   Path userdir = new Path(usercachedir, user);
   Path appcachedir = new Path(userdir, ContainerLocalizer.APPCACHE);
-  exec.deleteAsUser(user, appcachedir);
+  exec.deleteAsUser(new DeletionAsUserContext.Builder()
+  .setUser(user)
+  .setSubDir(appcachedir)
+  .build());
   FileContext.getLocalFSFileContext().delete(usercachedir, true);
 }
   }
@@ -218,7 +226,10 @@ public class TestLinuxContainerExecutor {
 for (String dir : localDirs) {
   Path filecache = new Path(dir, ContainerLocalizer.FILECACHE);
   Path filedir = new Path(filecache, user);
-  exec.deleteAsUser(user, filedir);
+  exec.deleteAsUser(new DeletionAsUserContext.Builder()
+  .setUser(user)
+  .setSubDir(filedir)
+  .build());
 }
   }
 
@@ -229,7 +240,10 @@ public class TestLinuxContainerExecutor {
   String containerId = CONTAINER_ + (id - 1);
   Path appdir = new Path(dir, appId);
   Path containerdir = new Path(appdir, containerId);
-  exec.deleteAsUser(user, containerdir);
+  exec.deleteAsUser(new DeletionAsUserContext.Builder()
+  .setUser(user)
+  .setSubDir(containerdir)
+  .build());
 }
   }
 
@@ -244,7 +258,11 @@ public class TestLinuxContainerExecutor {
 for (String file : files) {
   File f = new File(workSpace, file);
   if (f.exists()) {
-exec.deleteAsUser(user, new Path(file), ws);
+exec.deleteAsUser(new DeletionAsUserContext.Builder()
+.setUser(user)
+.setSubDir(new Path(file))
+.setBasedirs(ws)
+.build());
   }
 }
   }
@@ -310,9 +328,16 @@ public class TestLinuxContainerExecutor {
 Path pidFile = new Path(workDir, pid.txt);
 
 exec.activateContainer(cId, pidFile);
-return exec.launchContainer(container, scriptPath, tokensPath,
-  appSubmitter, appId, workDir, dirsHandler.getLocalDirs(),
-  dirsHandler.getLogDirs());
+return exec.launchContainer(new ContainerStartContext.Builder()
+.setContainer(container)
+.setNmPrivateContainerScriptPath(scriptPath)
+.setNmPrivateTokensPath(tokensPath)
+.setUser(appSubmitter)
+.setAppId(appId)
+.setContainerWorkDir(workDir)
+.setLocalDirs(dirsHandler.getLocalDirs())
+.setLogDirs(dirsHandler.getLogDirs())
+.build());
   }
 
   @Test
@@ -345,8 +370,14 @@ public class TestLinuxContainerExecutor {
 };
 exec.setConf(conf);
 
-exec.startLocalizer(nmPrivateContainerTokensPath, nmAddr, appSubmitter,
-  appId, locId, dirsHandler);
+exec.startLocalizer(new LocalizerStartContext.Builder()
+.setNmPrivateContainerTokens(nmPrivateContainerTokensPath)
+.setNmAddr(nmAddr)
+

hadoop git commit: HDFS-8441. Erasure Coding: make condition check earlier for setReplication. (waltersu4549)

2015-05-21 Thread waltersu4549
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 579677d02 - 24d0fbe60


HDFS-8441. Erasure Coding: make condition check earlier for setReplication. 
(waltersu4549)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24d0fbe6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24d0fbe6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24d0fbe6

Branch: refs/heads/HDFS-7285
Commit: 24d0fbe60df0cb20cc612d4a7f61baecfe7f2e5e
Parents: 579677d
Author: Walter Su waltersu4...@apache.org
Authored: Fri May 22 10:25:53 2015 +0800
Committer: Walter Su waltersu4...@apache.org
Committed: Fri May 22 10:25:53 2015 +0800

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +++
 .../hdfs/server/namenode/FSDirAttrOp.java   |  7 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 19 +++-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  2 --
 .../hadoop/hdfs/TestErasureCodingZones.java | 24 +++-
 5 files changed, 50 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24d0fbe6/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index c986f19..d71b9c3 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -250,3 +250,6 @@
 
 HDFS-8294. Erasure Coding: Fix Findbug warnings present in erasure coding.
 (Rakesh R via zhz)
+
+HDFS-8441. Erasure Coding: make condition check earlier for setReplication.
+(waltersu4549)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24d0fbe6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 9abb9fa..d34a0fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -380,7 +380,7 @@ public class FSDirAttrOp {
   static BlockInfoContiguous[] unprotectedSetReplication(
   FSDirectory fsd, String src, short replication, short[] blockRepls)
   throws QuotaExceededException, UnresolvedLinkException,
- SnapshotAccessControlException {
+  SnapshotAccessControlException, UnsupportedActionException {
 assert fsd.hasWriteLock();
 
 final INodesInPath iip = fsd.getINodesInPath4Write(src, true);
@@ -389,6 +389,11 @@ public class FSDirAttrOp {
   return null;
 }
 INodeFile file = inode.asFile();
+if (file.isStriped()) {
+  throw new UnsupportedActionException(
+  Cannot set replication to a file with striped blocks);
+}
+
 final short oldBR = file.getPreferredBlockReplication();
 
 // before setFileReplication, check for increasing block replication.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24d0fbe6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 497a7d8..c4549c4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2413,7 +2413,17 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 if (!DFSUtil.isValidName(src)) {
   throw new InvalidPathException(src);
 }
-blockManager.verifyReplication(src, replication, clientMachine);
+
+checkOperation(OperationCategory.READ);
+readLock();
+try {
+  checkOperation(OperationCategory.READ);
+  if (!isInECZone(src)) {
+blockManager.verifyReplication(src, replication, clientMachine);
+  }
+} finally {
+  readUnlock();
+}
 
 boolean skipSync = false;
 HdfsFileStatus stat = null;
@@ -7805,6 +7815,13 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 logAuditEvent(success, createErasureCodingZone, srcArg, null, 
resultingStat);
   }
 
+  private 

[2/2] hadoop git commit: HDFS-8454. Remove unnecessary throttling in TestDatanodeDeath. (Arpit Agarwal)

2015-05-21 Thread arp
HDFS-8454. Remove unnecessary throttling in TestDatanodeDeath. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a073e5bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a073e5bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a073e5bd

Branch: refs/heads/branch-2
Commit: a073e5bd31291ae090594122f961b1d949a875e0
Parents: 673bd97
Author: Arpit Agarwal a...@apache.org
Authored: Thu May 21 20:43:57 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Thu May 21 20:44:07 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java   | 1 -
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a073e5bd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index da95c60..e657036 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -465,6 +465,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions
 (Rakesh R via vinayakumarb)
 
+HDFS-8454. Remove unnecessary throttling in TestDatanodeDeath.
+(Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a073e5bd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
index 5a9b0e9..5cbc216 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
@@ -363,7 +363,6 @@ public class TestDatanodeDeath {
 
   // these are test settings
   dfstream.setChunksPerPacket(5);
-  dfstream.setArtificialSlowdown(3000);
 
   final long myseed = AppendTestUtil.nextLong();
   byte[] buffer = AppendTestUtil.randomBytes(myseed, fileSize);



[1/2] hadoop git commit: HDFS-8454. Remove unnecessary throttling in TestDatanodeDeath. (Arpit Agarwal)

2015-05-21 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 673bd9708 - a073e5bd3
  refs/heads/trunk 53fafcf06 - cf2b5694d


HDFS-8454. Remove unnecessary throttling in TestDatanodeDeath. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf2b5694
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf2b5694
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf2b5694

Branch: refs/heads/trunk
Commit: cf2b5694d656f5807011b3d8c97ee999ad070d35
Parents: 53fafcf
Author: Arpit Agarwal a...@apache.org
Authored: Thu May 21 20:43:57 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Thu May 21 20:43:57 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java   | 1 -
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf2b5694/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e830421..50fccd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -799,6 +799,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions
 (Rakesh R via vinayakumarb)
 
+HDFS-8454. Remove unnecessary throttling in TestDatanodeDeath.
+(Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf2b5694/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
index bf26295..f276f55 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
@@ -362,7 +362,6 @@ public class TestDatanodeDeath {
 
   // these are test settings
   dfstream.setChunksPerPacket(5);
-  dfstream.setArtificialSlowdown(3000);
 
   final long myseed = AppendTestUtil.nextLong();
   byte[] buffer = AppendTestUtil.randomBytes(myseed, fileSize);



hadoop git commit: HADOOP-12014. hadoop-config.cmd displays a wrong error message. Contributed by Kengo Seki.

2015-05-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk cf2b5694d - c7fea088f


HADOOP-12014. hadoop-config.cmd displays a wrong error message. Contributed by 
Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7fea088
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7fea088
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7fea088

Branch: refs/heads/trunk
Commit: c7fea088f7b6c44e4e04bde19dc839975d8ac8ba
Parents: cf2b569
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri May 22 14:13:51 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri May 22 14:15:08 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/main/bin/hadoop-config.cmd  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7fea088/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 32f0630..c02b44d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -737,6 +737,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12000. cannot use --java-home in test-patch (aw)
 
+HADOOP-12014. hadoop-config.cmd displays a wrong error message.
+(Kengo Seki via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7fea088/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
index bf86f1b..6670aca 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
@@ -115,7 +115,7 @@ if not defined JAVA_HOME (
 
 if not exist %JAVA_HOME%\bin\java.exe (
   echo Error: JAVA_HOME is incorrectly set.
-  echoPlease update %HADOOP_HOME%\conf\hadoop-env.cmd
+  echoPlease update %HADOOP_CONF_DIR%\hadoop-env.cmd
   goto :eof
 )
 



hadoop git commit: HADOOP-12014. hadoop-config.cmd displays a wrong error message. Contributed by Kengo Seki.

2015-05-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a073e5bd3 - 6cd767287


HADOOP-12014. hadoop-config.cmd displays a wrong error message. Contributed by 
Kengo Seki.

(cherry picked from commit 07007bdbc8900cfe8421f4829b1444fc9e7aceb7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cd76728
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cd76728
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cd76728

Branch: refs/heads/branch-2
Commit: 6cd767287845e1314964e78df2999b05c3cd6c38
Parents: a073e5b
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri May 22 14:13:51 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri May 22 14:15:37 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/main/bin/hadoop-config.cmd  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cd76728/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e58e9cb..681b4b8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -272,6 +272,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12000. cannot use --java-home in test-patch (aw)
 
+HADOOP-12014. hadoop-config.cmd displays a wrong error message.
+(Kengo Seki via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cd76728/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
index 13fb64c..f8d71ba 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
@@ -115,7 +115,7 @@ if not defined JAVA_HOME (
 
 if not exist %JAVA_HOME%\bin\java.exe (
   echo Error: JAVA_HOME is incorrectly set.
-  echoPlease update %HADOOP_HOME%\conf\hadoop-env.cmd
+  echoPlease update %HADOOP_CONF_DIR%\hadoop-env.cmd
   goto :eof
 )
 



hadoop git commit: HDFS-8451. DFSClient probe for encryption testing interprets empty URI property for enabled. Contributed by Steve Loughran.

2015-05-21 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 b68c338b1 - c55082567


HDFS-8451. DFSClient probe for encryption testing interprets empty URI property 
for enabled. Contributed by Steve Loughran.

(cherry picked from commit 05e04f34f27149537fdb89f46af26bee14531ca4)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5508256
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5508256
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5508256

Branch: refs/heads/branch-2.7
Commit: c55082567311377d3d010e18d886cef82d2ca141
Parents: b68c338
Author: Xiaoyu Yao x...@apache.org
Authored: Thu May 21 11:58:00 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Thu May 21 12:49:49 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  9 ++--
 .../java/org/apache/hadoop/hdfs/DFSUtil.java| 22 
 .../apache/hadoop/hdfs/KeyProviderCache.java|  4 ++--
 .../org/apache/hadoop/hdfs/TestDFSUtil.java | 18 
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  2 +-
 6 files changed, 49 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5508256/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 654c1b3..224d14b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -87,6 +87,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8404. Pending block replication can get stuck using older genstamp
 (Nathan Roberts via kihwal)
 
+HDFS-8451. DFSClient probe for encryption testing interprets empty URI
+property for enabled. (Steve Loughran via xyao)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5508256/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 93d705a..87e34cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3530,10 +3530,15 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  /**
+   * Probe for encryption enabled on this filesystem.
+   * See {@link DFSUtil#isHDFSEncryptionEnabled(Configuration)}
+   * @return true if encryption is enabled
+   */
   public boolean isHDFSEncryptionEnabled() {
-return conf.get(
-DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, null) != null;
+return DFSUtil.isHDFSEncryptionEnabled(this.conf);
   }
+
   /**
* Returns the SaslDataTransferClient configured for this DFSClient.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5508256/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 846231a..eceea64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -183,8 +183,8 @@ public class DFSUtil {
   a.isDecommissioned() ? 1 : -1;
   }
 };
-
-  
+
+
   /**
* Comparator for sorting DataNodeInfo[] based on decommissioned/stale 
states.
* Decommissioned/stale nodes are moved to the end of the array on sorting
@@ -1844,9 +1844,9 @@ public class DFSUtil {
   public static KeyProvider createKeyProvider(
   final Configuration conf) throws IOException {
 final String providerUriStr =
-conf.get(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, null);
+conf.getTrimmed(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, );
 // No provider set in conf
-if (providerUriStr == null) {
+if (providerUriStr.isEmpty()) {
   return null;
 }
 final URI providerUri;
@@ -1887,4 +1887,18 @@ public class DFSUtil {
 .createKeyProviderCryptoExtension(keyProvider);
 return cryptoProvider;
   }
+
+  /**
+   * Probe for HDFS Encryption being enabled; this 

hadoop git commit: HDFS-8210. Ozone: Implement storage container manager.

2015-05-21 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 2b6bcfdaf - 770ed9262


HDFS-8210. Ozone: Implement storage container manager.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/770ed926
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/770ed926
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/770ed926

Branch: refs/heads/HDFS-7240
Commit: 770ed92623869a2ecd7630d5786902fb58fa0232
Parents: 2b6bcfd
Author: Jitendra Pandey jiten...@apache.org
Authored: Thu May 21 09:04:37 2015 -0700
Committer: Jitendra Pandey jiten...@apache.org
Committed: Thu May 21 09:04:37 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../blockmanagement/BlockInfoContiguous.java|   6 +-
 .../server/blockmanagement/BlockManager.java|  72 ++---
 .../hdfs/server/blockmanagement/BlocksMap.java  |  16 +-
 .../blockmanagement/DatanodeDescriptor.java |  18 +-
 .../blockmanagement/DatanodeStorageInfo.java|  14 +-
 .../BitWiseTrieContainerMap.java| 128 
 .../StorageContainerConfiguration.java  |  32 ++
 .../StorageContainerManager.java| 318 +++
 .../storagecontainer/StorageContainerMap.java   | 123 +++
 .../StorageContainerNameService.java| 155 +
 .../protocol/ContainerLocationProtocol.java |  27 ++
 .../protocol/StorageContainer.java  |  34 ++
 .../TestStorageContainerMap.java|  92 ++
 14 files changed, 957 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9cfad7d..71790eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -33,6 +33,9 @@ Trunk (Unreleased)
 
 HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
 
+HDFS-7240. Ozone
+  HDFS-8210. Ozone: Implement storage container manager. (jitendra)
+
   IMPROVEMENTS
 
 HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 769046b..d286784 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import java.util.LinkedList;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.util.LightWeightGSet;
 
+import java.util.LinkedList;
+
 /**
  * BlockInfo class maintains for a given block
  * the {@link BlockCollection} it is part of and datanodes where the replicas 
of 
@@ -96,7 +96,7 @@ public class BlockInfoContiguous extends Block
 return storage == null ? null : storage.getDatanodeDescriptor();
   }
 
-  DatanodeStorageInfo getStorageInfo(int index) {
+  public DatanodeStorageInfo getStorageInfo(int index) {
 assert this.triplets != null : BlockInfo is not initialized;
 assert index = 0  index*3  triplets.length : Index is out of bound;
 return (DatanodeStorageInfo)triplets[index*3];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 54981fb..19a20bd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -17,48 +17,22 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 

[4/7] hadoop git commit: HADOOP-10366. Add whitespaces between classes for values in core-default.xml to fit better in browser. Contributed by kanaka kumar avvaru.

2015-05-21 Thread jitendra
HADOOP-10366. Add whitespaces between classes for values in core-default.xml to 
fit better in browser. Contributed by kanaka kumar avvaru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e4f1081
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e4f1081
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e4f1081

Branch: refs/heads/HDFS-7240
Commit: 0e4f1081c7a98e1c0c4f922f5e2afe467a0d763f
Parents: dc8434a
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 21 17:52:03 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 21 17:52:03 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 4 
 .../hadoop-common/src/main/resources/core-default.xml| 4 ++--
 .../hadoop-hdfs/src/site/markdown/TransparentEncryption.md   | 2 +-
 3 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4f1081/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 416b819..aff9368 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -593,6 +593,10 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11995. Make jetty version configurable from the maven command line.
 (Sriharsha Devineni via wheat9)
 
+HADOOP-10366. Add whitespaces between classes for values in
+core-default.xml to fit better in browser.
+(kanaka kumar avvaru via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4f1081/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 97e01a8..a1bc780 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -466,7 +466,7 @@ for ldap providers in the same way as above does.
 
 property
   nameio.serializations/name
-  
valueorg.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization/value
+  valueorg.apache.hadoop.io.serializer.WritableSerialization, 
org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization, 
org.apache.hadoop.io.serializer.avro.AvroReflectSerialization/value
   descriptionA list of serialization classes that can be used for
   obtaining serializers and deserializers./description
 /property
@@ -1655,7 +1655,7 @@ for ldap providers in the same way as above does.
 
 property
   namehadoop.security.crypto.codec.classes.aes.ctr.nopadding/name
-  
valueorg.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec/value
+  valueorg.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, 
org.apache.hadoop.crypto.JceAesCtrCryptoCodec/value
   description
 Comma-separated list of crypto codec implementations for 
AES/CTR/NoPadding. 
 The first implementation will be used if available, others are fallbacks.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4f1081/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
index aa2acbd..05e4249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
@@ -126,7 +126,7 @@ The prefix for a given crypto codec, contains a 
comma-separated list of implemen
 
  hadoop.security.crypto.codec.classes.aes.ctr.nopadding
 
-Default: 
`org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec`
+Default: `org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, 
org.apache.hadoop.crypto.JceAesCtrCryptoCodec`
 
 Comma-separated list of crypto codec implementations for AES/CTR/NoPadding. 
The first implementation will be used if available, others are fallbacks.
 



[2/7] hadoop git commit: HADOOP-11772. RPC Invoker relies on static ClientCache which has synchronized(this) blocks. Contributed by Haohui Mai.

2015-05-21 Thread jitendra
HADOOP-11772. RPC Invoker relies on static ClientCache which has 
synchronized(this) blocks. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb6b38d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb6b38d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb6b38d6

Branch: refs/heads/HDFS-7240
Commit: fb6b38d67d8b997eca498fc5010b037e3081ace7
Parents: 6329bd0
Author: Haohui Mai whe...@apache.org
Authored: Wed May 20 20:10:50 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed May 20 20:10:50 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../main/java/org/apache/hadoop/ipc/Client.java | 106 ++-
 2 files changed, 35 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb6b38d6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1624ce2..416b819 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -604,6 +604,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11970. Replace uses of ThreadLocalRandom with JDK7
 ThreadLocalRandom.  (Sean Busbey via Colin P. McCabe)
 
+HADOOP-11772. RPC Invoker relies on static ClientCache which has
+synchronized(this) blocks. (wheat9)
+
   BUG FIXES
 HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
 is an I/O error during requestShortCircuitShm (cmccabe)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb6b38d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index f28d8a2..feb811e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -43,6 +43,7 @@ import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -56,6 +57,8 @@ import java.util.concurrent.atomic.AtomicLong;
 import javax.net.SocketFactory;
 import javax.security.sasl.Sasl;
 
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -124,8 +127,8 @@ public class Client {
 retryCount.set(rc);
   }
 
-  private HashtableConnectionId, Connection connections =
-new HashtableConnectionId, Connection();
+  private final CacheConnectionId, Connection connections =
+  CacheBuilder.newBuilder().build();
 
   private Class? extends Writable valueClass;   // class of call values
   private AtomicBoolean running = new AtomicBoolean(true); // if client runs
@@ -1167,13 +1170,7 @@ public class Client {
 return;
   }
 
-  // release the resources
-  // first thing to do;take the connection out of the connection list
-  synchronized (connections) {
-if (connections.get(remoteId) == this) {
-  connections.remove(remoteId);
-}
-  }
+  connections.invalidate(remoteId);
 
   // close the streams and therefore the socket
   IOUtils.closeStream(out);
@@ -1260,14 +1257,12 @@ public class Client {
 }
 
 // wake up all connections
-synchronized (connections) {
-  for (Connection conn : connections.values()) {
-conn.interrupt();
-  }
+for (Connection conn : connections.asMap().values()) {
+  conn.interrupt();
 }
 
 // wait until all connections are closed
-while (!connections.isEmpty()) {
+while (connections.size()  0) {
   try {
 Thread.sleep(100);
   } catch (InterruptedException e) {
@@ -1283,56 +1278,12 @@ public class Client {
*/
   public Writable call(Writable param, InetSocketAddress address)
   throws IOException {
-return call(RPC.RpcKind.RPC_BUILTIN, param, address);
-
-  }
-  /** Make a call, passing codeparam/code, to the IPC server running at
-   * codeaddress/code, returning the value.  Throws exceptions if there are
-   * network problems or if the remote code threw an exception.
-   * @deprecated Use {@link 

[7/7] hadoop git commit: HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp. Contributed by Haohui Mai.

2015-05-21 Thread jitendra
HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b6bcfda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b6bcfda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b6bcfda

Branch: refs/heads/HDFS-7240
Commit: 2b6bcfdafa91223a4116e3e9304579f5f91dccac
Parents: 0305316
Author: Haohui Mai whe...@apache.org
Authored: Thu May 21 08:05:10 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu May 21 08:08:28 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 324 ++-
 .../hdfs/server/namenode/FSDirectory.java   |  91 --
 .../hdfs/server/namenode/FSEditLogLoader.java   |  15 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 280 +++-
 5 files changed, 371 insertions(+), 342 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6bcfda/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 77d7369..9cfad7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -577,6 +577,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-4383. Document the lease limits. (Arshad Mohammad via aajisaka)
 
+HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp.
+(wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6bcfda/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 1ff0899..307bd59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -18,11 +18,27 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
+import org.apache.commons.io.Charsets;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -34,15 +50,22 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.util.ChunkedArrayList;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
+import static 
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
+import static org.apache.hadoop.util.Time.now;
+
 class FSDirWriteFileOp {
   private FSDirWriteFileOp() {}
   static boolean unprotectedRemoveBlock(
@@ -278,6 +301,210 @@ class FSDirWriteFileOp {
   }
 
   /**
+   * Create a new file or overwrite an existing filebr
+   *
+   * Once the file is create the client then allocates a new block with the 
next
+  

[1/7] hadoop git commit: YARN-3654. ContainerLogsPage web UI should not have meta-refresh. Contributed by Xuan Gong

2015-05-21 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 8966d4217 - 2b6bcfdaf


YARN-3654. ContainerLogsPage web UI should not have meta-refresh. Contributed 
by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6329bd00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6329bd00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6329bd00

Branch: refs/heads/HDFS-7240
Commit: 6329bd00fa1f17cc9555efa496ea7607ad93e0ce
Parents: 8966d42
Author: Jian He jia...@apache.org
Authored: Wed May 20 17:20:21 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Wed May 20 17:20:21 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../nodemanager/webapp/ContainerLogsPage.java   |   3 -
 .../server/nodemanager/webapp/NMController.java |  40 +--
 .../nodemanager/webapp/NMWebAppFilter.java  | 118 +++
 .../server/nodemanager/webapp/WebServer.java|   7 +-
 5 files changed, 128 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6329bd00/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dfbc06e..3cba027 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -426,6 +426,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted.
 (Varun Vasudev via jianhe)
 
+YARN-3654. ContainerLogsPage web UI should not have meta-refresh. (Xuan 
Gong
+via jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6329bd00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
index 48e0c87..2fd6b2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
@@ -59,9 +59,6 @@ public class ContainerLogsPage extends NMView {
   if (redirectUrl.equals(false)) {
 set(TITLE, join(Failed redirect for , $(CONTAINER_ID)));
 //Error getting redirect url. Fall through.
-  } else {
-set(TITLE, join(Redirecting to log server for , $(CONTAINER_ID)));
-html.meta_http(refresh, 1; url= + redirectUrl);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6329bd00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
index 097532f..5be5b35 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
@@ -20,29 +20,16 @@ package org.apache.hadoop.yarn.server.nodemanager.webapp;
 
 import static org.apache.hadoop.yarn.util.StringHelper.join;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.nodemanager.Context;
-import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.Controller;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
 import 

[5/7] hadoop git commit: YARN-3694. Fix dead link for TimelineServer REST API. Contributed by Jagadesh Kiran N.

2015-05-21 Thread jitendra
YARN-3694. Fix dead link for TimelineServer REST API. Contributed by Jagadesh 
Kiran N.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5def580
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5def580
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5def580

Branch: refs/heads/HDFS-7240
Commit: a5def580879428bc7af3c030ef33554e0519f072
Parents: 0e4f108
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 21 23:14:44 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 21 23:14:44 2015 +0900

--
 hadoop-project/src/site/site.xml | 2 +-
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5def580/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 7234881..f3bb458 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -134,7 +134,7 @@
   item name=Introduction 
href=hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html/
   item name=Resource Manager 
href=hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html/
   item name=Node Manager 
href=hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html/
-  item name=Timeline Server 
href=TimelineServer.html#Timeline_Server_REST_API_v1/
+  item name=Timeline Server 
href=hadoop-yarn/hadoop-yarn-site/TimelineServer.html#Timeline_Server_REST_API_v1/
 /menu
 
 menu name=Hadoop Compatible File Systems inherit=top

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5def580/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3cba027..d1d2258 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -535,6 +535,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3609. Load node labels from storage inside RM serviceStart. (Wangda
 Tan via jianhe)
 
+YARN-3694. Fix dead link for TimelineServer REST API.
+(Jagadesh Kiran N via aajisaka)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[3/7] hadoop git commit: HDFS-4383. Document the lease limits. Contributed by Arshad Mohammad.

2015-05-21 Thread jitendra
HDFS-4383. Document the lease limits. Contributed by Arshad Mohammad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc8434ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc8434ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc8434ab

Branch: refs/heads/HDFS-7240
Commit: dc8434ab2b177ca9673bd8eecf7b185d4c4ffb31
Parents: fb6b38d
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu May 21 17:30:43 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Thu May 21 17:30:43 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hdfs/server/common/HdfsServerConstants.java | 23 +---
 2 files changed, 22 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8434ab/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5bcaddd..77d7369 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -575,6 +575,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-4185. Add a metric for number of active leases (Rakesh R via raviprak)
 
+HDFS-4383. Document the lease limits. (Arshad Mohammad via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8434ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index c664b01..26a7ab3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -42,10 +42,27 @@ import org.apache.hadoop.util.StringUtils;
 @InterfaceAudience.Private
 public interface HdfsServerConstants {
   int MIN_BLOCKS_FOR_WRITE = 1;
-  //
-  // Timeouts, constants
-  //
+  /**
+   * For a HDFS client to write to a file, a lease is granted; During the lease
+   * period, no other client can write to the file. The writing client can
+   * periodically renew the lease. When the file is closed, the lease is
+   * revoked. The lease duration is bound by this soft limit and a
+   * {@link HdfsServerConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the
+   * soft limit expires, the writer has sole write access to the file. If the
+   * soft limit expires and the client fails to close the file or renew the
+   * lease, another client can preempt the lease.
+   */
   long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
+  /**
+   * For a HDFS client to write to a file, a lease is granted; During the lease
+   * period, no other client can write to the file. The writing client can
+   * periodically renew the lease. When the file is closed, the lease is
+   * revoked. The lease duration is bound by a
+   * {@link HdfsServerConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this 
hard
+   * limit. If after the hard limit expires and the client has failed to renew
+   * the lease, HDFS assumes that the client has quit and will automatically
+   * close the file on behalf of the writer, and recover the lease.
+   */
   long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
   long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
   // We need to limit the length and depth of a path in the filesystem.



[6/7] hadoop git commit: YARN-3646. Applications are getting stuck some times in case of retry policy forever. Contributed by Raju Bairishetti.

2015-05-21 Thread jitendra
YARN-3646. Applications are getting stuck some times in case of retry
policy forever. Contributed by Raju Bairishetti.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0305316d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0305316d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0305316d

Branch: refs/heads/HDFS-7240
Commit: 0305316d6932e6f1a05021354d77b6934e57e171
Parents: a5def58
Author: Devaraj K deva...@apache.org
Authored: Thu May 21 20:14:44 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu May 21 20:14:44 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../yarn/client/api/impl/TestYarnClient.java| 32 
 .../org/apache/hadoop/yarn/client/RMProxy.java  | 15 -
 3 files changed, 43 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0305316d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d1d2258..e5a9ee9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -538,6 +538,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3694. Fix dead link for TimelineServer REST API.
 (Jagadesh Kiran N via aajisaka)
 
+YARN-3646. Applications are getting stuck some times in case of retry
+policy forever. (Raju Bairishetti via devaraj)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0305316d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 511fa4a..bc40b9a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -1265,4 +1265,36 @@ public class TestYarnClient {
 ReservationSystemTestUtil.reservationQ);
 return request;
   }
+
+  @Test(timeout = 3, expected = ApplicationNotFoundException.class)
+  public void testShouldNotRetryForeverForNonNetworkExceptions() throws 
Exception {
+YarnConfiguration conf = new YarnConfiguration();
+conf.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, -1);
+
+ResourceManager rm = null;
+YarnClient yarnClient = null;
+try {
+  // start rm
+  rm = new ResourceManager();
+  rm.init(conf);
+  rm.start();
+
+  yarnClient = YarnClient.createYarnClient();
+  yarnClient.init(conf);
+  yarnClient.start();
+
+  // create invalid application id
+  ApplicationId appId = ApplicationId.newInstance(1430126768L, 10645);
+
+  // RM should throw ApplicationNotFoundException exception
+  yarnClient.getApplicationReport(appId);
+} finally {
+  if (yarnClient != null) {
+yarnClient.stop();
+  }
+  if (rm != null) {
+rm.stop();
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0305316d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
index fa8d642..28628f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
@@ -224,19 +224,20 @@ public class RMProxyT {
   failoverSleepBaseMs, failoverSleepMaxMs);
 }
 
-if (waitForEver) {
-  return RetryPolicies.RETRY_FOREVER;
-}
-
 if (rmConnectionRetryIntervalMS  0) {
   throw new YarnRuntimeException(Invalid Configuration.  +
   YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS +
should not be negative.);
 }
 
-RetryPolicy retryPolicy =
-RetryPolicies.retryUpToMaximumTimeWithFixedSleep(rmConnectWaitMS,
-rmConnectionRetryIntervalMS, TimeUnit.MILLISECONDS);
+RetryPolicy retryPolicy = null;
+if (waitForEver) {