hadoop git commit: HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast. Contributed by Tsz Wo Nicholas Sze.

2015-03-09 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5578e22ce - e43882e84


HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast. Contributed by Tsz 
Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e43882e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e43882e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e43882e8

Branch: refs/heads/trunk
Commit: e43882e84ae44301eabd0122b5e5492da5fe9f66
Parents: 5578e22
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 9 10:52:17 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 10:52:17 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 61 +---
 .../hdfs/server/namenode/TestFileTruncate.java  | 11 +++-
 3 files changed, 51 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e43882e8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e106b1a..094abfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -734,6 +734,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7411. Change decommission logic to throttle by blocks rather than
 nodes in each interval. (Andrew Wang via cdouglas)
 
+HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast.
+(Tsz Wo Nicholas Sze via jing9)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e43882e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
index 5c4c7b4..e80e14f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -41,10 +41,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -69,6 +65,9 @@ public class TestAppendSnapshotTruncate {
   private static final int BLOCK_SIZE = 1024;
   private static final int DATANODE_NUM = 3;
   private static final short REPLICATION = 3;
+  private static final int FILE_WORKER_NUM = 3;
+  private static final long TEST_TIME_SECOND = 10;
+  private static final long TEST_TIMEOUT_SECOND = TEST_TIME_SECOND + 60;
 
   static final int SHORT_HEARTBEAT = 1;
   static final String[] EMPTY_STRINGS = {};
@@ -106,7 +105,7 @@ public class TestAppendSnapshotTruncate {
 
 
   /** Test randomly mixing append, snapshot and truncate operations. */
-  @Test
+  @Test(timeout=TEST_TIMEOUT_SECOND*1000)
   public void testAST() throws Exception {
 final String dirPathString = /dir;
 final Path dir = new Path(dirPathString);
@@ -121,12 +120,12 @@ public class TestAppendSnapshotTruncate {
 }
 localDir.mkdirs();
 
-final DirWorker w = new DirWorker(dir, localDir, 3);
+final DirWorker w = new DirWorker(dir, localDir, FILE_WORKER_NUM);
 w.startAllFiles();
 w.start();
-Worker.sleep(10L*1000);
+Worker.sleep(TEST_TIME_SECOND * 1000);
 w.stop();
-w.stoptAllFiles();
+w.stopAllFiles();
 w.checkEverything();
   }
 
@@ -259,7 +258,7 @@ public class TestAppendSnapshotTruncate {
   }
 }
 
-void stoptAllFiles() throws InterruptedException {
+void stopAllFiles() throws InterruptedException {
   for(FileWorker f : files) { 
 f.stop();
   }
@@ -269,12 +268,12 @@ public class TestAppendSnapshotTruncate {
   LOG.info(checkEverything);
   for(FileWorker f : files) { 
 f.checkFullFile();
-Preconditions.checkState(f.state.get() != State.ERROR);
+f.checkErrorState();
   }
   for(String snapshot : snapshotPaths.keySet()) {
 checkSnapshot(snapshot);
   }
-  Preconditions.checkState(state.get() != State.ERROR);

[Hadoop Wiki] Trivial Update of HowToContribute by QwertyManiac

2015-03-09 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The HowToContribute page has been changed by QwertyManiac:
https://wiki.apache.org/hadoop/HowToContribute?action=diffrev1=103rev2=104

Comment:
Casey Brotherton noticed that we're missing FUSE dependencies in the native 
library build pre-requisites command. Added them now.

  
  For RHEL (and hence also CentOS):
  {{{
- yum -y install  lzo-devel  zlib-devel  gcc autoconf automake libtool 
openssl-devel
+ yum -y install  lzo-devel  zlib-devel  gcc autoconf automake libtool 
openssl-devel fuse-devel
  }}}
  
  For Debian and Ubuntu:
  {{{
- apt-get -y install maven build-essential autoconf automake libtool cmake 
zlib1g-dev pkg-config libssl-dev
+ apt-get -y install maven build-essential autoconf automake libtool cmake 
zlib1g-dev pkg-config libssl-dev libfuse-dev
  }}}
  
  Native libraries are mandatory for Windows. For instructions see 
Hadoop2OnWindows.


hadoop git commit: HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN. Contributed by Duo Zhang.

2015-03-09 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk e43882e84 - 42e3a8051


HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN. Contributed by Duo 
Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42e3a805
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42e3a805
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42e3a805

Branch: refs/heads/trunk
Commit: 42e3a805117ff7cb054c2442f7b0e0cc54be63ad
Parents: e43882e
Author: Haohui Mai whe...@apache.org
Authored: Mon Mar 9 11:07:40 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Mar 9 11:07:40 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/minikdc/MiniKdc.java | 17 ++-
 .../minikdc/TestChangeOrgNameAndDomain.java | 32 
 3 files changed, 45 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e3a805/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6f2c8c3..37604c4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1067,6 +1067,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
 
+HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN.
+(Duo Zhang via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e3a805/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
--
diff --git 
a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
 
b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
index a649bd2..9388360 100644
--- 
a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
+++ 
b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
@@ -36,6 +36,7 @@ import 
org.apache.directory.server.core.kerberos.KeyDerivationInterceptor;
 import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmIndex;
 import 
org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition;
 import org.apache.directory.server.core.partition.ldif.LdifPartition;
+import org.apache.directory.server.kerberos.KerberosConfig;
 import org.apache.directory.server.kerberos.kdc.KdcServer;
 import 
org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory;
 import org.apache.directory.server.kerberos.shared.keytab.Keytab;
@@ -418,7 +419,15 @@ public class MiniKdc {
   IOUtils.closeQuietly(is1);
 }
 
-kdc = new KdcServer();
+KerberosConfig kerberosConfig = new KerberosConfig();
+kerberosConfig.setMaximumRenewableLifetime(Long.parseLong(conf
+.getProperty(MAX_RENEWABLE_LIFETIME)));
+kerberosConfig.setMaximumTicketLifetime(Long.parseLong(conf
+.getProperty(MAX_TICKET_LIFETIME)));
+kerberosConfig.setSearchBaseDn(String.format(dc=%s,dc=%s, orgName,
+orgDomain));
+kerberosConfig.setPaEncTimestampRequired(false);
+kdc = new KdcServer(kerberosConfig);
 kdc.setDirectoryService(ds);
 
 // transport
@@ -431,12 +440,6 @@ public class MiniKdc {
   throw new IllegalArgumentException(Invalid transport:  + transport);
 }
 kdc.setServiceName(conf.getProperty(INSTANCE));
-kdc.getConfig().setMaximumRenewableLifetime(
-Long.parseLong(conf.getProperty(MAX_RENEWABLE_LIFETIME)));
-kdc.getConfig().setMaximumTicketLifetime(
-Long.parseLong(conf.getProperty(MAX_TICKET_LIFETIME)));
-
-kdc.getConfig().setPaEncTimestampRequired(false);
 kdc.start();
 
 StringBuilder sb = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e3a805/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
--
diff --git 
a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
 
b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
new file mode 100644
index 000..3843130
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. 

hadoop git commit: HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN. Contributed by Duo Zhang.

2015-03-09 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 28f4e6b22 - bbaa1344a


HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN. Contributed by Duo 
Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbaa1344
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbaa1344
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbaa1344

Branch: refs/heads/branch-2.7
Commit: bbaa1344a18c6986ca3aebbf7505730b15872c27
Parents: 28f4e6b
Author: Haohui Mai whe...@apache.org
Authored: Mon Mar 9 11:07:40 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Mar 9 11:08:57 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/minikdc/MiniKdc.java | 17 ++-
 .../minikdc/TestChangeOrgNameAndDomain.java | 32 
 3 files changed, 45 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbaa1344/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f1272ff..07cb14a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -644,6 +644,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
 
+HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN.
+(Duo Zhang via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbaa1344/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
--
diff --git 
a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
 
b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
index a649bd2..9388360 100644
--- 
a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
+++ 
b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
@@ -36,6 +36,7 @@ import 
org.apache.directory.server.core.kerberos.KeyDerivationInterceptor;
 import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmIndex;
 import 
org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition;
 import org.apache.directory.server.core.partition.ldif.LdifPartition;
+import org.apache.directory.server.kerberos.KerberosConfig;
 import org.apache.directory.server.kerberos.kdc.KdcServer;
 import 
org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory;
 import org.apache.directory.server.kerberos.shared.keytab.Keytab;
@@ -418,7 +419,15 @@ public class MiniKdc {
   IOUtils.closeQuietly(is1);
 }
 
-kdc = new KdcServer();
+KerberosConfig kerberosConfig = new KerberosConfig();
+kerberosConfig.setMaximumRenewableLifetime(Long.parseLong(conf
+.getProperty(MAX_RENEWABLE_LIFETIME)));
+kerberosConfig.setMaximumTicketLifetime(Long.parseLong(conf
+.getProperty(MAX_TICKET_LIFETIME)));
+kerberosConfig.setSearchBaseDn(String.format(dc=%s,dc=%s, orgName,
+orgDomain));
+kerberosConfig.setPaEncTimestampRequired(false);
+kdc = new KdcServer(kerberosConfig);
 kdc.setDirectoryService(ds);
 
 // transport
@@ -431,12 +440,6 @@ public class MiniKdc {
   throw new IllegalArgumentException(Invalid transport:  + transport);
 }
 kdc.setServiceName(conf.getProperty(INSTANCE));
-kdc.getConfig().setMaximumRenewableLifetime(
-Long.parseLong(conf.getProperty(MAX_RENEWABLE_LIFETIME)));
-kdc.getConfig().setMaximumTicketLifetime(
-Long.parseLong(conf.getProperty(MAX_TICKET_LIFETIME)));
-
-kdc.getConfig().setPaEncTimestampRequired(false);
 kdc.start();
 
 StringBuilder sb = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbaa1344/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
--
diff --git 
a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
 
b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
new file mode 100644
index 000..3843130
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license 

hadoop git commit: HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast. Contributed by Tsz Wo Nicholas Sze.

2015-03-09 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b46f9e72d - c7105fcff


HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast. Contributed by Tsz 
Wo Nicholas Sze.

(cherry picked from commit e43882e84ae44301eabd0122b5e5492da5fe9f66)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7105fcf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7105fcf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7105fcf

Branch: refs/heads/branch-2
Commit: c7105fcff0ac65c5f85d7cc8ca7c24b984217c2c
Parents: b46f9e7
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 9 10:52:17 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 10:52:39 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 61 +---
 .../hdfs/server/namenode/TestFileTruncate.java  | 11 +++-
 3 files changed, 51 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7105fcf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1576088..cc8a69e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -429,6 +429,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7411. Change decommission logic to throttle by blocks rather than
 nodes in each interval. (Andrew Wang via cdouglas)
 
+HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast.
+(Tsz Wo Nicholas Sze via jing9)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7105fcf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
index 5c4c7b4..e80e14f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -41,10 +41,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -69,6 +65,9 @@ public class TestAppendSnapshotTruncate {
   private static final int BLOCK_SIZE = 1024;
   private static final int DATANODE_NUM = 3;
   private static final short REPLICATION = 3;
+  private static final int FILE_WORKER_NUM = 3;
+  private static final long TEST_TIME_SECOND = 10;
+  private static final long TEST_TIMEOUT_SECOND = TEST_TIME_SECOND + 60;
 
   static final int SHORT_HEARTBEAT = 1;
   static final String[] EMPTY_STRINGS = {};
@@ -106,7 +105,7 @@ public class TestAppendSnapshotTruncate {
 
 
   /** Test randomly mixing append, snapshot and truncate operations. */
-  @Test
+  @Test(timeout=TEST_TIMEOUT_SECOND*1000)
   public void testAST() throws Exception {
 final String dirPathString = /dir;
 final Path dir = new Path(dirPathString);
@@ -121,12 +120,12 @@ public class TestAppendSnapshotTruncate {
 }
 localDir.mkdirs();
 
-final DirWorker w = new DirWorker(dir, localDir, 3);
+final DirWorker w = new DirWorker(dir, localDir, FILE_WORKER_NUM);
 w.startAllFiles();
 w.start();
-Worker.sleep(10L*1000);
+Worker.sleep(TEST_TIME_SECOND * 1000);
 w.stop();
-w.stoptAllFiles();
+w.stopAllFiles();
 w.checkEverything();
   }
 
@@ -259,7 +258,7 @@ public class TestAppendSnapshotTruncate {
   }
 }
 
-void stoptAllFiles() throws InterruptedException {
+void stopAllFiles() throws InterruptedException {
   for(FileWorker f : files) { 
 f.stop();
   }
@@ -269,12 +268,12 @@ public class TestAppendSnapshotTruncate {
   LOG.info(checkEverything);
   for(FileWorker f : files) { 
 f.checkFullFile();
-Preconditions.checkState(f.state.get() != State.ERROR);
+f.checkErrorState();
   }
   for(String snapshot : snapshotPaths.keySet()) {
 

hadoop git commit: HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN. Contributed by Duo Zhang.

2015-03-09 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c7105fcff - d8d8ed35f


HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN. Contributed by Duo 
Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8d8ed35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8d8ed35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8d8ed35

Branch: refs/heads/branch-2
Commit: d8d8ed35f00b15ee0f2f8aaf3fe7f7b42141286b
Parents: c7105fc
Author: Haohui Mai whe...@apache.org
Authored: Mon Mar 9 11:07:40 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Mar 9 11:08:35 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/minikdc/MiniKdc.java | 17 ++-
 .../minikdc/TestChangeOrgNameAndDomain.java | 32 
 3 files changed, 45 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8d8ed35/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 11ad906..e095bbd 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -656,6 +656,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
 
+HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN.
+(Duo Zhang via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8d8ed35/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
--
diff --git 
a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
 
b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
index a649bd2..9388360 100644
--- 
a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
+++ 
b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
@@ -36,6 +36,7 @@ import 
org.apache.directory.server.core.kerberos.KeyDerivationInterceptor;
 import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmIndex;
 import 
org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition;
 import org.apache.directory.server.core.partition.ldif.LdifPartition;
+import org.apache.directory.server.kerberos.KerberosConfig;
 import org.apache.directory.server.kerberos.kdc.KdcServer;
 import 
org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory;
 import org.apache.directory.server.kerberos.shared.keytab.Keytab;
@@ -418,7 +419,15 @@ public class MiniKdc {
   IOUtils.closeQuietly(is1);
 }
 
-kdc = new KdcServer();
+KerberosConfig kerberosConfig = new KerberosConfig();
+kerberosConfig.setMaximumRenewableLifetime(Long.parseLong(conf
+.getProperty(MAX_RENEWABLE_LIFETIME)));
+kerberosConfig.setMaximumTicketLifetime(Long.parseLong(conf
+.getProperty(MAX_TICKET_LIFETIME)));
+kerberosConfig.setSearchBaseDn(String.format(dc=%s,dc=%s, orgName,
+orgDomain));
+kerberosConfig.setPaEncTimestampRequired(false);
+kdc = new KdcServer(kerberosConfig);
 kdc.setDirectoryService(ds);
 
 // transport
@@ -431,12 +440,6 @@ public class MiniKdc {
   throw new IllegalArgumentException(Invalid transport:  + transport);
 }
 kdc.setServiceName(conf.getProperty(INSTANCE));
-kdc.getConfig().setMaximumRenewableLifetime(
-Long.parseLong(conf.getProperty(MAX_RENEWABLE_LIFETIME)));
-kdc.getConfig().setMaximumTicketLifetime(
-Long.parseLong(conf.getProperty(MAX_TICKET_LIFETIME)));
-
-kdc.getConfig().setPaEncTimestampRequired(false);
 kdc.start();
 
 StringBuilder sb = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8d8ed35/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
--
diff --git 
a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
 
b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
new file mode 100644
index 000..3843130
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license 

hadoop git commit: HADOOP-11692. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang.

2015-03-09 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d8d8ed35f - 45cc7514f


HADOOP-11692. Improve authentication failure WARN message to avoid user 
confusion. Contributed by Yongjun Zhang.

(cherry picked from commit de1101cb5be2d8efd0ef4945f64ccfe7cbd01049)

Conflicts:

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45cc7514
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45cc7514
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45cc7514

Branch: refs/heads/branch-2
Commit: 45cc7514f5c71526d0c6bedf5b1b3cb74511b941
Parents: d8d8ed3
Author: Yongjun Zhang yzh...@cloudera.com
Authored: Mon Mar 9 11:55:32 2015 -0700
Committer: Yongjun Zhang yzh...@cloudera.com
Committed: Mon Mar 9 12:12:29 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/Server.java | 9 +++--
 2 files changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45cc7514/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e095bbd..a8f5313 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -659,6 +659,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN.
 (Duo Zhang via wheat9)
 
+HADOOP-11692. Improve authentication failure WARN message to avoid user
+confusion. (Yongjun Zhang)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45cc7514/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index f63b54f..66fefdd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1297,10 +1297,15 @@ public abstract class Server {
   saslResponse = processSaslMessage(saslMessage);
 } catch (IOException e) {
   rpcMetrics.incrAuthenticationFailures();
+  if (LOG.isDebugEnabled()) {
+LOG.debug(StringUtils.stringifyException(e));
+  }
   // attempting user could be null
+  IOException tce = (IOException) getCauseForInvalidToken(e);
   AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + :
-  + attemptingUser +  ( + e.getLocalizedMessage() + ));
-  throw (IOException) getCauseForInvalidToken(e);
+  + attemptingUser +  ( + e.getLocalizedMessage()
+  + ) with true cause: ( + tce.getLocalizedMessage() + ));
+  throw tce;
 }
 
 if (saslServer != null  saslServer.isComplete()) {



[10/50] [abbrv] hadoop git commit: HDFS-6565. Use jackson instead jetty json in hdfs-client. Contributed by Akira AJISAKA.

2015-03-09 Thread jing9
HDFS-6565. Use jackson instead jetty json in hdfs-client. Contributed by Akira 
AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/046dc672
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/046dc672
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/046dc672

Branch: refs/heads/HDFS-7285
Commit: 046dc672e6206224f2b5ca3e7577540ef22febbf
Parents: bab6209c
Author: Haohui Mai whe...@apache.org
Authored: Tue Mar 3 17:54:13 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:23 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 217 +--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  21 +-
 .../apache/hadoop/hdfs/web/TestJsonUtil.java|  22 +-
 4 files changed, 127 insertions(+), 136 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/046dc672/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 42430ef..4e7b919 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1077,6 +1077,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7757. Misleading error messages in FSImage.java. (Brahma Reddy Battula
 via Arpit Agarwal)
 
+HDFS-6565. Use jackson instead jetty json in hdfs-client.
+(Akira Ajisaka via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/046dc672/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index aa6100c..2e67848 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.*;
@@ -35,7 +34,8 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
-import org.mortbay.util.ajax.JSON;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -95,59 +95,6 @@ public class JsonUtil {
 return (TokenBlockTokenIdentifier)toToken(m);
   }
 
-  /** Convert a Token[] to a JSON array. */
-  private static Object[] toJsonArray(final Token? extends TokenIdentifier[] 
array
-  ) throws IOException {
-if (array == null) {
-  return null;
-} else if (array.length == 0) {
-  return EMPTY_OBJECT_ARRAY;
-} else {
-  final Object[] a = new Object[array.length];
-  for(int i = 0; i  array.length; i++) {
-a[i] = toJsonMap(array[i]);
-  }
-  return a;
-}
-  }
-
-  /** Convert a token object to a JSON string. */
-  public static String toJsonString(final Token? extends TokenIdentifier[] 
tokens
-  ) throws IOException {
-if (tokens == null) {
-  return null;
-}
-
-final MapString, Object m = new TreeMapString, Object();
-m.put(Token.class.getSimpleName(), toJsonArray(tokens));
-return toJsonString(Token.class.getSimpleName() + s, m);
-  }
-
-  /** Convert an Object[] to a ListToken?.  */
-  private static ListToken? toTokenList(final Object[] objects) throws 
IOException {
-if (objects == null) {
-  return null;
-} else if (objects.length == 0) {
-  return Collections.emptyList();
-} else {
-  final ListToken? list = new ArrayListToken?(objects.length);
-  for(int i = 0; i  objects.length; i++) {
-list.add(toToken((Map?, ?)objects[i]));
-  }
-  return list;
-}
-  }
-
-  /** Convert a JSON map to a ListToken?. */
-  public static ListToken? toTokenList(final Map?, ? json) throws 
IOException {
-if (json == null) {
-  return null;
-}
-
-final Map?, ? m = (Map?, 

[30/50] [abbrv] hadoop git commit: Update CHANGES.txt for YARN-2616 to fix indentation.

2015-03-09 Thread jing9
Update CHANGES.txt for YARN-2616 to fix indentation.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22b1f538
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22b1f538
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22b1f538

Branch: refs/heads/HDFS-7285
Commit: 22b1f538fcf5d2e470e87845cf0b217a1289e873
Parents: fed8745
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Fri Mar 6 00:53:03 2015 +0900
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:25 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22b1f538/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3ea5501..5f61462 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -89,8 +89,8 @@ Release 2.7.0 - UNRELEASED
 YARN-2217. [YARN-1492] Shared cache client side changes. 
 (Chris Trezzo via kasha)
 
- YARN-2616 [YARN-913] Add CLI client to the registry to list, view
- and manipulate entries. (Akshay Radia via stevel)
+YARN-2616 [YARN-913] Add CLI client to the registry to list, view
+and manipulate entries. (Akshay Radia via stevel)
 
 YARN-2994. Document work-preserving RM restart. (Jian He via ozawa)
 



[17/50] [abbrv] hadoop git commit: HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one. Contributed by Dongming Liang.

2015-03-09 Thread jing9
HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one. Contributed by 
Dongming Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/521a196d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/521a196d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/521a196d

Branch: refs/heads/HDFS-7285
Commit: 521a196db7c509e2738ce2e4d712cb347bfa2dca
Parents: 97adb9a
Author: Dongming Liang dongming.li...@capitalone.com
Authored: Wed Mar 4 17:47:05 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:24 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java  | 4 +++-
 .../java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java | 3 +--
 .../org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java | 2 +-
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 3 ++-
 .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java | 4 ++--
 .../src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java  | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestReplication.java| 3 ++-
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java   | 2 +-
 11 files changed, 19 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2be1a4c..d9008d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -706,6 +706,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7535. Utilize Snapshot diff report for distcp. (jing9)
 
+HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one.
+(Dongming Liang via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
index 628c610..ce96ac9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
@@ -351,7 +352,8 @@ public class RemoteBlockReader extends FSInputChecker 
implements BlockReader {
   long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
   DatanodeID datanodeID, PeerCache peerCache) {
 // Path is used only for printing block and file information in debug
-super(new Path(/blk_ + blockId + : + bpid + :of:+ file)/*too non 
path-like?*/,
+super(new Path(/ + Block.BLOCK_FILE_PREFIX + blockId +
+: + bpid + :of:+ file)/*too non path-like?*/,
   1, verifyChecksum,
   checksum.getChecksumSize()  0? checksum : null, 
   checksum.getBytesPerChecksum(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 754df2c..001f684 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -83,7 +83,6 @@ import java.util.concurrent.Future;
 public class DataStorage extends Storage {
 
   public final static String BLOCK_SUBDIR_PREFIX = subdir;
-  final static String BLOCK_FILE_PREFIX = blk_;
   final static String 

[02/50] [abbrv] hadoop git commit: HADOOP-11183. Memory-based S3AOutputstream. (Thomas Demoor via stevel)

2015-03-09 Thread jing9
HADOOP-11183. Memory-based S3AOutputstream. (Thomas Demoor via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24478c0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24478c0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24478c0a

Branch: refs/heads/HDFS-7285
Commit: 24478c0a40fe769d56daac52b8413b187dea8df2
Parents: 4006739
Author: Steve Loughran ste...@apache.org
Authored: Tue Mar 3 16:18:39 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:22 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 .../src/main/resources/core-default.xml |  20 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |   8 +
 .../hadoop/fs/s3a/S3AFastOutputStream.java  | 413 +++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  24 +-
 .../src/site/markdown/tools/hadoop-aws/index.md |  46 ++-
 .../hadoop/fs/s3a/TestS3AFastOutputStream.java  |  74 
 7 files changed, 570 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24478c0a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 11785f2..cb5cd4d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -667,6 +667,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11620. Add support for load balancing across a group of KMS for HA.
 (Arun Suresh via wang)
 
+HADOOP-11183. Memory-based S3AOutputstream. (Thomas Demoor via stevel)
+
   BUG FIXES
 
 HADOOP-11512. Use getTrimmedStrings when reading serialization keys

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24478c0a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 80dd15b..74390d8 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -763,13 +763,13 @@ for ldap providers in the same way as above does.
 property
   namefs.s3a.connection.establish.timeout/name
   value5000/value
-  descriptionSocket connection setup timeout in seconds./description
+  descriptionSocket connection setup timeout in milliseconds./description
 /property
 
 property
   namefs.s3a.connection.timeout/name
   value5/value
-  descriptionSocket connection timeout in seconds./description
+  descriptionSocket connection timeout in milliseconds./description
 /property
 
 property
@@ -846,6 +846,22 @@ for ldap providers in the same way as above does.
 /property
 
 property
+  namefs.s3a.fast.upload/name
+  valuefalse/value
+  descriptionUpload directly from memory instead of buffering to
+disk first. Memory usage and parallelism can be controlled as up to
+fs.s3a.multipart.size memory is consumed for each (part)upload actively
+uploading (fs.s3a.threads.max) or queueing 
(fs.s3a.max.total.tasks)/description
+/property
+
+  property
+  namefs.s3a.fast.buffer.size/name
+  value1048576/value
+  descriptionSize of initial memory buffer in bytes allocated for an
+upload. No effect if fs.s3a.fast.upload is false./description
+/property
+
+property
   namefs.s3a.impl/name
   valueorg.apache.hadoop.fs.s3a.S3AFileSystem/value
   descriptionThe implementation class of the S3A Filesystem/description

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24478c0a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 1d4f67b..e7462dc 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -83,6 +83,14 @@ public class Constants {
   // comma separated list of directories
   public static final String BUFFER_DIR = fs.s3a.buffer.dir;
 
+  // should we upload directly from memory rather than using a file buffer
+  public static final String FAST_UPLOAD = fs.s3a.fast.upload;
+  public static final boolean DEFAULT_FAST_UPLOAD = false;
+
+  //initial size of memory buffer for a fast upload
+  public static final String FAST_BUFFER_SIZE = fs.s3a.fast.buffer.size;
+  public static final int 

[08/50] [abbrv] hadoop git commit: HDFS-7535. Utilize Snapshot diff report for distcp. Contributed by Jing Zhao.

2015-03-09 Thread jing9
HDFS-7535. Utilize Snapshot diff report for distcp. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39535ec7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39535ec7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39535ec7

Branch: refs/heads/HDFS-7285
Commit: 39535ec788aee029e31b42ae666ecf516215d10c
Parents: bf3604b
Author: Jing Zhao ji...@apache.org
Authored: Wed Mar 4 10:30:53 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:23 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/tools/CopyListing.java|   4 +-
 .../java/org/apache/hadoop/tools/DiffInfo.java  |  90 +
 .../java/org/apache/hadoop/tools/DistCp.java|  16 +-
 .../apache/hadoop/tools/DistCpConstants.java|   3 +
 .../apache/hadoop/tools/DistCpOptionSwitch.java |  12 +-
 .../org/apache/hadoop/tools/DistCpOptions.java  |  34 ++
 .../org/apache/hadoop/tools/DistCpSync.java | 192 ++
 .../org/apache/hadoop/tools/OptionsParser.java  |  24 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |   3 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java | 349 +++
 .../apache/hadoop/tools/TestOptionsParser.java  |  75 +++-
 12 files changed, 790 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 62006d3..3c6d447 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -704,6 +704,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7789. DFSck should resolve the path to support cross-FS symlinks.
 (gera)
 
+HDFS-7535. Utilize Snapshot diff report for distcp. (jing9)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index a7b68a9..e3c58e9 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -224,7 +224,9 @@ public abstract class CopyListing extends Configured {
Credentials credentials,
DistCpOptions options)
   throws IOException {
-
+if (options.shouldUseDiff()) {
+  return new GlobbedCopyListing(configuration, credentials);
+}
 String copyListingClassName = configuration.get(DistCpConstants.
 CONF_LABEL_COPY_LISTING_CLASS, );
 Class? extends CopyListing copyListingClass;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
new file mode 100644
index 000..b617de7
--- /dev/null
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+
+/**
+ * Information presenting a 

[20/50] [abbrv] hadoop git commit: YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving events for old client. (Zhihai Xu via kasha)

2015-03-09 Thread jing9
YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving 
events for old client. (Zhihai Xu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31d3efe6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31d3efe6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31d3efe6

Branch: refs/heads/HDFS-7285
Commit: 31d3efe6fa0a643d0935ccb7780482dc3f4789b2
Parents: 27f8981
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 19:47:02 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:24 2015 -0700

--
 .../apache/hadoop/ha/ClientBaseWithFixes.java   | 11 +++-
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../recovery/ZKRMStateStore.java| 53 
 .../TestZKRMStateStoreZKClientConnections.java  | 33 +---
 4 files changed, 70 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31d3efe6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
index 7d0727a..5f03133 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
@@ -90,6 +90,14 @@ public abstract class ClientBaseWithFixes extends ZKTestCase 
{
 // XXX this doesn't need to be volatile! (Should probably be final)
 volatile CountDownLatch clientConnected;
 volatile boolean connected;
+protected ZooKeeper client;
+
+public void initializeWatchedClient(ZooKeeper zk) {
+if (client != null) {
+throw new RuntimeException(Watched Client was already set);
+}
+client = zk;
+}
 
 public CountdownWatcher() {
 reset();
@@ -191,8 +199,7 @@ public abstract class ClientBaseWithFixes extends 
ZKTestCase {
 zk.close();
 }
 }
-
-
+watcher.initializeWatchedClient(zk);
 return zk;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31d3efe6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9a52325..4dd61eb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -701,6 +701,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with 
pending 
 jobs. (Siqi Li via kasha)
 
+YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher 
receiving 
+events for old client. (Zhihai Xu via kasha)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31d3efe6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index 591a551..614ef15 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -153,7 +153,13 @@ public class ZKRMStateStore extends RMStateStore {
 
   @VisibleForTesting
   protected ZooKeeper zkClient;
-  private ZooKeeper oldZkClient;
+
+  /* activeZkClient is not used to do actual operations,
+   * it is only used to verify client session for watched events and
+   * it gets activated into zkClient on connection event.
+   */
+  @VisibleForTesting
+  ZooKeeper activeZkClient;
 
   /** Fencing related variables */
   private static final String FENCING_LOCK = RM_ZK_FENCING_LOCK;
@@ -355,21 +361,14 @@ public class ZKRMStateStore extends RMStateStore {
   }
 
   private synchronized void closeZkClients() throws IOException 

[39/50] [abbrv] hadoop git commit: HADOOP-11653. shellprofiles should require .sh extension (Brahma Reddy Battula via aw)

2015-03-09 Thread jing9
HADOOP-11653. shellprofiles should require .sh extension (Brahma Reddy Battula 
via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/667c3fce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/667c3fce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/667c3fce

Branch: refs/heads/HDFS-7285
Commit: 667c3fce2d3404c4daf5cdf2a034e7a53d3754c7
Parents: c6199e7
Author: Allen Wittenauer a...@apache.org
Authored: Fri Mar 6 13:54:11 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:26 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../src/main/bin/hadoop-functions.sh|   4 +-
 .../src/main/conf/shellprofile.d/example| 106 ---
 .../src/main/conf/shellprofile.d/example.sh | 106 +++
 .../hadoop-hdfs/src/main/shellprofile.d/hdfs|  36 ---
 .../hadoop-hdfs/src/main/shellprofile.d/hdfs.sh |  36 +++
 .../shellprofile.d/mapreduce|  41 ---
 .../shellprofile.d/mapreduce.sh |  41 +++
 .../hadoop-yarn/shellprofile.d/yarn |  62 ---
 .../hadoop-yarn/shellprofile.d/yarn.sh  |  62 +++
 10 files changed, 250 insertions(+), 247 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 65c6d85..628faa3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -414,6 +414,9 @@ Trunk (Unreleased)
 
 HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
 
+HADOOP-11653. shellprofiles should require .sh extension
+(Brahma Reddy Battula via aw)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index bccbe25..9488e3c 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -162,13 +162,13 @@ function hadoop_import_shellprofiles
   local files2
 
   if [[ -d ${HADOOP_LIBEXEC_DIR}/shellprofile.d ]]; then
-files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*)
+files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*.sh)
   else
 hadoop_error WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. 
Functionality may not work.
   fi
 
   if [[ -d ${HADOOP_CONF_DIR}/shellprofile.d ]]; then
-files2=(${HADOOP_CONF_DIR}/shellprofile.d/*)
+files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh)
   fi
 
   for i in ${files1[@]} ${files2[@]}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example 
b/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example
deleted file mode 100644
index dc50821..000
--- a/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example
+++ /dev/null
@@ -1,106 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the License); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an AS IS BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# This is an example shell profile.  It does not do anything other than
-# show an example of what the general structure and API of the pluggable
-# shell profile code looks like.
-#
-#
-
-#
-#  First, register the profile:
-#
-# hadoop_add_profile example
-#
-#
-# This profile name determines what the name of the functions will
-# be. The general pattern is 

[15/50] [abbrv] hadoop git commit: HDFS-7682. {{DistributedFileSystem#getFileChecksum}} of a snapshotted file includes non-snapshotted content. Contributed by Charles Lamb.

2015-03-09 Thread jing9
HDFS-7682. {{DistributedFileSystem#getFileChecksum}} of a snapshotted file 
includes non-snapshotted content. Contributed by Charles Lamb.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ac995e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ac995e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ac995e5

Branch: refs/heads/HDFS-7285
Commit: 0ac995e50c9c6073a51fbcacdb05da53a844f59b
Parents: 046dc67
Author: Aaron T. Myers a...@apache.org
Authored: Tue Mar 3 18:08:59 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:23 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  3 +++
 .../snapshot/TestSnapshotFileLength.java| 25 +---
 3 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac995e5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4e7b919..7ff3c78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1080,6 +1080,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-6565. Use jackson instead jetty json in hdfs-client.
 (Akira Ajisaka via wheat9)
 
+HDFS-7682. {{DistributedFileSystem#getFileChecksum}} of a snapshotted file
+includes non-snapshotted content. (Charles Lamb via atm)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac995e5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index abcd847..aac7b51 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2220,6 +2220,9 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 
 // get block checksum for each block
 long remaining = length;
+if (src.contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR)) {
+  remaining = Math.min(length, blockLocations.getFileLength());
+}
 for(int i = 0; i  locatedblocks.size()  remaining  0; i++) {
   if (refetchBlocks) {  // refetch to get fresh tokens
 blockLocations = callGetBlockLocations(namenode, src, 0, length);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac995e5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
index 98aafc1..d53140f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 
-
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -29,8 +29,9 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.*;
-
+import static org.hamcrest.CoreMatchers.not;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -103,17 +104,35 @@ public class TestSnapshotFileLength {
 Path file1snap1
 = SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
 
+final FileChecksum snapChksum1 = hdfs.getFileChecksum(file1snap1);
+assertThat(file and snapshot file checksums are not equal,
+hdfs.getFileChecksum(file1), is(snapChksum1));
+
 // Append to the file.
 FSDataOutputStream out = 

[13/50] [abbrv] hadoop git commit: HDFS-7869. Inconsistency in the return information while performing rolling upgrade ( Contributed by J.Andreina )

2015-03-09 Thread jing9
HDFS-7869. Inconsistency in the return information while performing rolling 
upgrade ( Contributed by J.Andreina )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/871bd4e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/871bd4e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/871bd4e6

Branch: refs/heads/HDFS-7285
Commit: 871bd4e688b83d63296b604d92d99c781a8977ff
Parents: 7814f50
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Mar 4 14:38:38 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:23 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java   | 6 +++---
 .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java  | 3 +--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 2 +-
 .../test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java   | 4 ++--
 5 files changed, 10 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bd4e6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7ff3c78..2037973 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1083,6 +1083,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7682. {{DistributedFileSystem#getFileChecksum}} of a snapshotted file
 includes non-snapshotted content. (Charles Lamb via atm)
 
+HDFS-7869. Inconsistency in the return information while performing rolling
+upgrade ( J.Andreina via vinayakumarb )
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bd4e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index bd2a203..621ebef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7500,7 +7500,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 }
   }
 
-  void finalizeRollingUpgrade() throws IOException {
+  RollingUpgradeInfo finalizeRollingUpgrade() throws IOException {
 checkSuperuserPrivilege();
 checkOperation(OperationCategory.WRITE);
 writeLock();
@@ -7508,7 +7508,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 try {
   checkOperation(OperationCategory.WRITE);
   if (!isRollingUpgrade()) {
-return;
+return null;
   }
   checkNameNodeSafeMode(Failed to finalize rolling upgrade);
 
@@ -7533,7 +7533,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 if (auditLog.isInfoEnabled()  isExternalInvocation()) {
   logAuditEvent(true, finalizeRollingUpgrade, null, null, null);
 }
-return;
+return returnInfo;
   }
 
   RollingUpgradeInfo finalizeRollingUpgradeInternal(long finalizeTime)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bd4e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 9ccdb40..f20fb35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1145,8 +1145,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
 case PREPARE:
   return namesystem.startRollingUpgrade();
 case FINALIZE:
-  namesystem.finalizeRollingUpgrade();
-  return null;
+  return namesystem.finalizeRollingUpgrade();
 default:
   throw new UnsupportedActionException(action +  is not yet supported.);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bd4e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

[25/50] [abbrv] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong

2015-03-09 Thread jing9
YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie 
Shen and Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70703472
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70703472
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70703472

Branch: refs/heads/HDFS-7285
Commit: 70703472f46b2c722616e6af8e654c0798c04814
Parents: d8bb732
Author: Jian He jia...@apache.org
Authored: Thu Mar 5 21:14:41 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:25 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|   5 +-
 .../yarn/api/ApplicationBaseProtocol.java   | 355 +++
 .../yarn/api/ApplicationClientProtocol.java | 290 +--
 .../yarn/api/ApplicationHistoryProtocol.java| 303 +---
 .../apache/hadoop/yarn/webapp/ResponseInfo.java |   6 +-
 .../hadoop/yarn/webapp/YarnWebParams.java   |   4 +
 .../hadoop/yarn/webapp/view/HtmlBlock.java  |   2 +
 .../ApplicationHistoryClientService.java| 185 +-
 .../ApplicationHistoryManager.java  | 126 ++-
 .../ApplicationHistoryServer.java   |   2 +-
 .../webapp/AHSView.java |  28 +-
 .../webapp/AHSWebApp.java   |  16 +-
 .../webapp/AHSWebServices.java  |   6 +-
 .../webapp/AppAttemptPage.java  |  15 +-
 .../webapp/AppPage.java |  21 +-
 .../TestApplicationHistoryClientService.java|  12 +-
 .../webapp/TestAHSWebApp.java   |  27 +-
 .../webapp/TestAHSWebServices.java  |  26 +-
 .../yarn/server/api/ApplicationContext.java | 122 ---
 .../yarn/server/webapp/AppAttemptBlock.java | 119 ---
 .../hadoop/yarn/server/webapp/AppBlock.java | 274 --
 .../hadoop/yarn/server/webapp/AppsBlock.java|  53 ++-
 .../yarn/server/webapp/ContainerBlock.java  |  29 +-
 .../hadoop/yarn/server/webapp/WebPageUtils.java |  86 +
 .../hadoop/yarn/server/webapp/WebServices.java  |  68 +++-
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |  11 +-
 .../resourcemanager/webapp/AppAttemptPage.java  |  55 +++
 .../server/resourcemanager/webapp/AppBlock.java | 344 --
 .../server/resourcemanager/webapp/AppPage.java  |  25 +-
 .../resourcemanager/webapp/AppsBlock.java   | 132 ---
 .../webapp/AppsBlockWithMetrics.java|   1 +
 .../webapp/CapacitySchedulerPage.java   |   1 +
 .../resourcemanager/webapp/ContainerPage.java   |  44 +++
 .../webapp/DefaultSchedulerPage.java|   1 +
 .../webapp/FairSchedulerPage.java   |  21 +-
 .../server/resourcemanager/webapp/RMWebApp.java |   5 +
 .../resourcemanager/webapp/RmController.java|   8 +
 .../server/resourcemanager/webapp/RmView.java   |  31 +-
 .../resourcemanager/webapp/TestAppPage.java |   8 +-
 .../resourcemanager/webapp/TestRMWebApp.java|  48 ++-
 .../webapp/TestRMWebAppFairScheduler.java   |  14 +-
 42 files changed, 1376 insertions(+), 1556 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dcf328f..accde78 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -360,6 +360,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3122. Metrics for container's actual CPU usage. 
 (Anubhav Dhoot via kasha)
 
+YARN-1809. Synchronize RM and TimeLineServer Web-UIs. (Zhijie Shen and
+Xuan Gong via jianhe)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 1c3f201..a89884a 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -63,9 +63,12 @@
 Bug pattern=BC_UNCONFIRMED_CAST /
   /Match
   Match
-Class 
name=~org\.apache\.hadoop\.yarn\.server\.resourcemanager\.rmapp\.attempt\.RMAppAttemptMetrics
 /
+Class 
name=org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics
 /
 Method name=getLocalityStatistics /
 Bug pattern=EI_EXPOSE_REP /
+  /Match
+  Match
+Class 

[48/50] [abbrv] hadoop git commit: HDFS-7411. Change decommission logic to throttle by blocks rather than nodes in each interval. Contributed by Andrew Wang

2015-03-09 Thread jing9
HDFS-7411. Change decommission logic to throttle by blocks rather
than nodes in each interval. Contributed by Andrew Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1e4dfe2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1e4dfe2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1e4dfe2

Branch: refs/heads/HDFS-7285
Commit: a1e4dfe211b5153697d2375680314c2295fc9e05
Parents: 7b91223
Author: Chris Douglas cdoug...@apache.org
Authored: Sun Mar 8 18:31:04 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:17:55 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../apache/hadoop/hdfs/HdfsConfiguration.java   |   2 +-
 .../server/blockmanagement/BlockManager.java| 123 +---
 .../server/blockmanagement/DatanodeManager.java | 109 +---
 .../blockmanagement/DecommissionManager.java| 619 +--
 .../src/main/resources/hdfs-default.xml |  23 +-
 .../apache/hadoop/hdfs/TestDecommission.java| 412 
 .../blockmanagement/BlockManagerTestUtil.java   |   8 +-
 .../TestReplicationPolicyConsiderLoad.java  |   2 +-
 .../namenode/TestDecommissioningStatus.java |  59 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   2 +-
 .../namenode/TestNamenodeCapacityReport.java|   4 +-
 13 files changed, 996 insertions(+), 376 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 29717e1..3cd6372 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -719,6 +719,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7855. Separate class Packet from DFSOutputStream. (Li Bo bia jing9)
 
+HDFS-7411. Change decommission logic to throttle by blocks rather than
+nodes in each interval. (Andrew Wang via cdouglas)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9e9cd40..2dded68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -455,8 +455,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final long
DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS_DEFAULT = 3L;
   public static final String  DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY = 
dfs.namenode.decommission.interval;
   public static final int DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT = 30;
-  public static final String  DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY 
= dfs.namenode.decommission.nodes.per.interval;
-  public static final int 
DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT = 5;
+  public static final String  
DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY = 
dfs.namenode.decommission.blocks.per.interval;
+  public static final int 
DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT = 50;
+  public static final String  
DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES = 
dfs.namenode.decommission.max.concurrent.tracked.nodes;
+  public static final int 
DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT = 100;
   public static final String  DFS_NAMENODE_HANDLER_COUNT_KEY = 
dfs.namenode.handler.count;
   public static final int DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = 
dfs.namenode.service.handler.count;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
index 8f2966a..29a2667 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
+++ 

[35/50] [abbrv] hadoop git commit: YARN-3275. CapacityScheduler: Preemption happening on non-preemptable queues. Contributed by Eric Payne

2015-03-09 Thread jing9
YARN-3275. CapacityScheduler: Preemption happening on non-preemptable queues. 
Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfc015f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfc015f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfc015f2

Branch: refs/heads/HDFS-7285
Commit: dfc015f295d92286ad570667556e837bd1d30134
Parents: fcae120
Author: Jason Lowe jl...@apache.org
Authored: Fri Mar 6 22:36:18 2015 +
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:26 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../hadoop/yarn/util/resource/Resources.java|  5 
 .../ProportionalCapacityPreemptionPolicy.java   | 27 
 ...estProportionalCapacityPreemptionPolicy.java | 24 +
 4 files changed, 54 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc015f2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c2aa2ef..250fc1c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -719,6 +719,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3227. Timeline renew delegation token fails when RM user's TGT is 
expired
 (Zhijie Shen via xgong)
 
+YARN-3275. CapacityScheduler: Preemption happening on non-preemptable
+queues (Eric Payne via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc015f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index a205bd1..bcb0421 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -260,4 +260,9 @@ public class Resources {
 return createResource(Math.min(lhs.getMemory(), rhs.getMemory()),
 Math.min(lhs.getVirtualCores(), rhs.getVirtualCores()));
   }
+  
+  public static Resource componentwiseMax(Resource lhs, Resource rhs) {
+return createResource(Math.max(lhs.getMemory(), rhs.getMemory()),
+Math.max(lhs.getVirtualCores(), rhs.getVirtualCores()));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc015f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 738f527..87a2a00 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -527,6 +527,17 @@ public class ProportionalCapacityPreemptionPolicy 
implements SchedulingEditPolic
 ListRMContainer skippedAMContainerlist = new ArrayListRMContainer();
 
 for (TempQueue qT : queues) {
+  if (qT.preemptionDisabled  qT.leafQueue != null) {
+if (LOG.isDebugEnabled()) {
+  if (Resources.greaterThan(rc, clusterResource,
+  qT.toBePreempted, Resource.newInstance(0, 0))) {
+LOG.debug(Tried to preempt the following 
+  + resources from non-preemptable queue: 
+  + qT.queueName +  - Resources:  + qT.toBePreempted);
+  }
+}
+continue;
+  }
   // we act only if we are violating balance by more than
   // maxIgnoredOverCapacity
   if (Resources.greaterThan(rc, clusterResource, 

[23/50] [abbrv] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong

2015-03-09 Thread jing9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
deleted file mode 100644
index 935be61..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* License); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an AS IS BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import static org.apache.hadoop.yarn.util.StringHelper.join;
-import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE;
-
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.commons.lang.StringEscapeUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
-import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
-
-import com.google.inject.Inject;
-
-class AppsBlock extends HtmlBlock {
-  final ConcurrentMapApplicationId, RMApp apps;
-  private final Configuration conf;
-  final ResourceManager rm;
-  @Inject
-  AppsBlock(ResourceManager rm, ViewContext ctx, Configuration conf) {
-super(ctx);
-apps = rm.getRMContext().getRMApps();
-this.conf = conf;
-this.rm = rm;
-  }
-
-  @Override public void render(Block html) {
-TBODYTABLEHamlet tbody = html.
-  table(#apps).
-thead().
-  tr().
-th(.id, ID).
-th(.user, User).
-th(.name, Name).
-th(.type, Application Type).
-th(.queue, Queue).
-th(.starttime, StartTime).
-th(.finishtime, FinishTime).
-th(.state, YarnApplicationState).
-th(.finalstatus, FinalStatus).
-th(.progress, Progress).
-th(.ui, Tracking UI)._()._().
-tbody();
-CollectionYarnApplicationState reqAppStates = null;
-String reqStateString = $(APP_STATE);
-if (reqStateString != null  !reqStateString.isEmpty()) {
-  String[] appStateStrings = reqStateString.split(,);
-  reqAppStates = new HashSetYarnApplicationState(appStateStrings.length);
-  for(String stateString : appStateStrings) {
-reqAppStates.add(YarnApplicationState.valueOf(stateString));
-  }
-}
-StringBuilder appsTableData = new StringBuilder([\n);
-for (RMApp app : apps.values()) {
-  if (reqAppStates != null  
!reqAppStates.contains(app.createApplicationState())) {
-continue;
-  }
-  AppInfo appInfo = new AppInfo(rm, app, true, 
WebAppUtils.getHttpSchemePrefix(conf));
-  String percent = String.format(%.1f, appInfo.getProgress());
-  //AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js
-  appsTableData.append([\a href=')
-  .append(url(app, appInfo.getAppId())).append(')
-  .append(appInfo.getAppId()).append(/a\,\)
-  

[09/50] [abbrv] hadoop git commit: YARN-3131. YarnClientImpl should check FAILED and KILLED state in submitApplication. Contributed by Chang Li

2015-03-09 Thread jing9
YARN-3131. YarnClientImpl should check FAILED and KILLED state in 
submitApplication. Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf3604b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf3604b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf3604b5

Branch: refs/heads/HDFS-7285
Commit: bf3604b53af4243c0ea0b4fd3ef398c2b7eaf450
Parents: aca0abe
Author: Jason Lowe jl...@apache.org
Authored: Wed Mar 4 18:04:22 2015 +
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:23 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../yarn/client/api/impl/YarnClientImpl.java| 19 +--
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  2 +-
 .../yarn/client/api/impl/TestYarnClient.java| 55 ++--
 4 files changed, 68 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3604b5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5eaf4f4..03bb20b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -692,6 +692,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a 
queue's
 available resource-limit from the parent queue. (Wangda Tan via vinodkv)
 
+YARN-3131. YarnClientImpl should check FAILED and KILLED state in
+submitApplication (Chang Li via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3604b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 6acf7d8..d6b36bb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -254,13 +254,22 @@ public class YarnClientImpl extends YarnClient {
 
 int pollCount = 0;
 long startTime = System.currentTimeMillis();
-
+EnumSetYarnApplicationState waitingStates = 
+ EnumSet.of(YarnApplicationState.NEW,
+ YarnApplicationState.NEW_SAVING,
+ YarnApplicationState.SUBMITTED);
+EnumSetYarnApplicationState failToSubmitStates = 
+  EnumSet.of(YarnApplicationState.FAILED,
+  YarnApplicationState.KILLED);
 while (true) {
   try {
-YarnApplicationState state =
-getApplicationReport(applicationId).getYarnApplicationState();
-if (!state.equals(YarnApplicationState.NEW) 
-!state.equals(YarnApplicationState.NEW_SAVING)) {
+ApplicationReport appReport = getApplicationReport(applicationId);
+YarnApplicationState state = appReport.getYarnApplicationState();
+if (!waitingStates.contains(state)) {
+  if(failToSubmitStates.contains(state)) {
+throw new YarnException(Failed to submit  + applicationId + 
+ to YARN :  + appReport.getDiagnostics());
+  }
   LOG.info(Submitted application  + applicationId);
   break;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3604b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index da7d505..782bc43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -646,7 +646,7 @@ public abstract class ProtocolHATestBase extends 
ClientBaseWithFixes {
   ApplicationReport report =
   ApplicationReport.newInstance(appId, attemptId, fakeUser,
   fakeQueue, 

[44/50] [abbrv] hadoop git commit: Adding 2.8 section in CHANGES.txt

2015-03-09 Thread jing9
Adding 2.8 section in CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1040f705
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1040f705
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1040f705

Branch: refs/heads/HDFS-7285
Commit: 1040f70590e978933d04d5164f53ac7355d1fdc3
Parents: f88e63e
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Sun Mar 8 20:24:33 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:17:55 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040f705/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0af0beb..6f2c8c3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -426,6 +426,18 @@ Trunk (Unreleased)
 
 HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay 
Radia)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040f705/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3cd6372..e106b1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -313,6 +313,18 @@ Trunk (Unreleased)
 HDFS-4681. 
TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks 
 fails using IBM java (Ayappan via aw)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040f705/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 049b17d..8f06ac8 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -245,6 +245,18 @@ Trunk (Unreleased)
 
 MAPREDUCE-6078. native-task: fix gtest build on macosx (Binglin Chang)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040f705/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f28e932..da8b02e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -45,6 +45,18 @@ Trunk - Unreleased
 YARN-2428. LCE default banned user list should have yarn (Varun
 Saxena via aw)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[06/50] [abbrv] hadoop git commit: MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc comments. Contributed by Akira AJISAKA.

2015-03-09 Thread jing9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
index fa3708e..2c69542 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
@@ -181,7 +181,7 @@ public static final String OUTDIR = 
mapreduce.output.fileoutputformat.outputdir
*  Get the {@link Path} to the task's temporary output directory 
*  for the map-reduce job
*  
-   * h4 id=SideEffectFilesTasks' Side-Effect Files/h4
+   * b id=SideEffectFilesTasks' Side-Effect Files/b
* 
* pSome applications need to create/write-to side-files, which differ from
* the actual job-outputs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
index 24baa59..c31cab7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
@@ -81,7 +81,7 @@ import java.util.*;
  * p
  * Usage in Reducer:
  * pre
- * K, V String generateFileName(K k, V v) {
+ * lt;K, Vgt; String generateFileName(K k, V v) {
  *   return k.toString() + _ + v.toString();
  * }
  * 
@@ -124,16 +124,16 @@ import java.util.*;
  * /p
  * 
  * pre
- * private MultipleOutputsText, Text out;
+ * private MultipleOutputslt;Text, Textgt; out;
  * 
  * public void setup(Context context) {
- *   out = new MultipleOutputsText, Text(context);
+ *   out = new MultipleOutputslt;Text, Textgt;(context);
  *   ...
  * }
  * 
- * public void reduce(Text key, IterableText values, Context context) throws 
IOException, InterruptedException {
+ * public void reduce(Text key, Iterablelt;Textgt; values, Context context) 
throws IOException, InterruptedException {
  * for (Text t : values) {
- *   out.write(key, t, generateFileName(iparameter list.../i));
+ *   out.write(key, t, generateFileName(lt;iparameter list.../igt;));
  *   }
  * }
  * 
@@ -294,7 +294,6 @@ public class MultipleOutputsKEYOUT, VALUEOUT {
 
   /**
* Adds a named output for the job.
-   * p/
*
* @param job   job to add the named output
* @param namedOutput   named output name, it has to be a word, letters

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
index 4a40840..2a89908 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.mapreduce.Partitioner;
  *   li{@link #setOffsets}/li
  *   li{@link #setLeftOffset}/li
  *   li{@link #setRightOffset}/li
- * /ul/p
+ * /ul
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving


[43/50] [abbrv] hadoop git commit: HDFS-7857. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang.

2015-03-09 Thread jing9
HDFS-7857. Improve authentication failure WARN message to avoid user confusion. 
Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/129f88a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/129f88a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/129f88a7

Branch: refs/heads/HDFS-7285
Commit: 129f88a7aaef3b4db549570e2784d2daf432feea
Parents: 1040f70
Author: Yongjun Zhang yzh...@cloudera.com
Authored: Sun Mar 8 20:39:46 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:17:55 2015 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/Server.java | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/129f88a7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 893e0eb..d2d61b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1324,10 +1324,15 @@ public abstract class Server {
   saslResponse = processSaslMessage(saslMessage);
 } catch (IOException e) {
   rpcMetrics.incrAuthenticationFailures();
+  if (LOG.isDebugEnabled()) {
+LOG.debug(StringUtils.stringifyException(e));
+  }
   // attempting user could be null
+  IOException tce = (IOException) getTrueCause(e);
   AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + :
-  + attemptingUser +  ( + e.getLocalizedMessage() + ));
-  throw (IOException) getTrueCause(e);
+  + attemptingUser +  ( + e.getLocalizedMessage()
+  + ) with true cause: ( + tce.getLocalizedMessage() + ));
+  throw tce;
 }
 
 if (saslServer != null  saslServer.isComplete()) {



[03/50] [abbrv] hadoop git commit: YARN-3272. Surface container locality info in RM web UI (Jian He via wangda)

2015-03-09 Thread jing9
YARN-3272. Surface container locality info in RM web UI (Jian He via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4006739a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4006739a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4006739a

Branch: refs/heads/HDFS-7285
Commit: 4006739a2883ccc26d7c1af837d989bc529eb50d
Parents: 6bc2798
Author: Wangda Tan wan...@apache.org
Authored: Tue Mar 3 11:49:01 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:22 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../dev-support/findbugs-exclude.xml|  7 ++
 .../rmapp/attempt/RMAppAttemptMetrics.java  | 21 -
 .../resourcemanager/scheduler/NodeType.java |  9 +-
 .../scheduler/SchedulerApplicationAttempt.java  | 15 +++-
 .../scheduler/capacity/LeafQueue.java   | 95 +---
 .../server/resourcemanager/webapp/AppBlock.java | 45 +-
 .../scheduler/capacity/TestReservations.java|  8 +-
 8 files changed, 163 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0850f0b..5eaf4f4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -348,6 +348,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3281. Added RMStateStore to StateMachine visualization list.
 (Chengbing Liu via jianhe)
 
+YARN-3272. Surface container locality info in RM web UI.
+(Jian He via wangda)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 70f1a71..1c3f201 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -63,6 +63,13 @@
 Bug pattern=BC_UNCONFIRMED_CAST /
   /Match
   Match
+Class 
name=~org\.apache\.hadoop\.yarn\.server\.resourcemanager\.rmapp\.attempt\.RMAppAttemptMetrics
 /
+Method name=getLocalityStatistics /
+Bug pattern=EI_EXPOSE_REP /
+Method name=incNumAllocatedContainers/
+Bug pattern=VO_VOLATILE_INCREMENT /
+  /Match
+  Match
 Class 
name=org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl$AppRejectedTransition
 /
 Bug pattern=BC_UNCONFIRMED_CAST /
   /Match

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
index 0e60fd5..bc22073 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 public class RMAppAttemptMetrics {
@@ -49,6 +50,10 @@ public class RMAppAttemptMetrics {
   private AtomicLong finishedVcoreSeconds = new AtomicLong(0);
   private RMContext rmContext;
 
+  private int[][] localityStatistics =
+  new int[NodeType.values().length][NodeType.values().length];
+  private volatile int totalAllocatedContainers;
+
   public RMAppAttemptMetrics(ApplicationAttemptId attemptId,
   RMContext rmContext) {
 

[01/50] [abbrv] hadoop git commit: MAPREDUCE-6268. Fix typo in Task Attempt API's URL. Contributed by Ryu Kobayashi.

2015-03-09 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 720901acf - edc476bfc


MAPREDUCE-6268. Fix typo in Task Attempt API's URL. Contributed by Ryu 
Kobayashi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e208eeed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e208eeed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e208eeed

Branch: refs/heads/HDFS-7285
Commit: e208eeed517d22a6d05fd3c5b61078eb175511f1
Parents: c110aab
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 3 16:21:16 2015 +0900
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:22 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/site/markdown/HistoryServerRest.md| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e208eeed/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ccd24a6..5fd7d30 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -399,6 +399,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6223. TestJobConf#testNegativeValueForTaskVmem failures. 
 (Varun Saxena via kasha)
 
+MAPREDUCE-6268. Fix typo in Task Attempt API's URL. (Ryu Kobayashi
+via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e208eeed/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
index 8a78754..b4ce00a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
@@ -1889,7 +1889,7 @@ A Task Attempt resource contains information about a 
particular task attempt wit
 
 Use the following URI to obtain an Task Attempt Object, from a task identified 
by the attemptid value.
 
-  * http://history server http 
address:port/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempt/{attemptid}
+  * http://history server http 
address:port/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}
 
  HTTP Operations Supported
 



[19/50] [abbrv] hadoop git commit: HDFS-7746. Add a test randomly mixing append, truncate and snapshot operations.

2015-03-09 Thread jing9
HDFS-7746. Add a test randomly mixing append, truncate and snapshot operations.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27f89818
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27f89818
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27f89818

Branch: refs/heads/HDFS-7285
Commit: 27f89818eee5084ffd475cadc42b76f2c32a747b
Parents: d138804
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Mar 5 10:21:29 2015 +0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:24 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 478 +++
 2 files changed, 481 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27f89818/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d9008d9..f9541e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -709,6 +709,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one.
 (Dongming Liang via shv)
 
+HDFS-7746. Add a test randomly mixing append, truncate and snapshot
+operations. (szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27f89818/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
new file mode 100644
index 000..5c4c7b4
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -0,0 +1,478 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Test randomly mixing append, snapshot and truncate operations.
+ * Use local file system to simulate the each operation and verify
+ * the correctness.
+ */
+public class TestAppendSnapshotTruncate {
+  static {
+GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
+  }
+  private static final Log LOG = 
LogFactory.getLog(TestAppendSnapshotTruncate.class);
+  private static final int BLOCK_SIZE = 1024;
+  private static final int 

[12/50] [abbrv] hadoop git commit: HDFS-7879. hdfs.dll does not export functions of the public libhdfs API. Contributed by Chris Nauroth.

2015-03-09 Thread jing9
HDFS-7879. hdfs.dll does not export functions of the public libhdfs API. 
Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aca0abef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aca0abef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aca0abef

Branch: refs/heads/HDFS-7285
Commit: aca0abefea5b6bd5249d0ee35875a598c1ed1e7c
Parents: 871bd4e
Author: Haohui Mai whe...@apache.org
Authored: Wed Mar 4 09:17:21 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:23 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hadoop-hdfs/src/CMakeLists.txt  | 23 +++--
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.h  | 92 +++-
 3 files changed, 111 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aca0abef/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2037973..62006d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1086,6 +1086,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7869. Inconsistency in the return information while performing rolling
 upgrade ( J.Andreina via vinayakumarb )
 
+HDFS-7879. hdfs.dll does not export functions of the public libhdfs API.
+(Chris Nauroth via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aca0abef/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index aceeac1..563727b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -27,7 +27,15 @@ 
include(../../../hadoop-common-project/hadoop-common/src/JNIFlags.cmake NO_POLIC
 function(add_dual_library LIBNAME)
 add_library(${LIBNAME} SHARED ${ARGN})
 add_library(${LIBNAME}_static STATIC ${ARGN})
-set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME ${LIBNAME})
+# Linux builds traditionally ship a libhdfs.a (static linking) and 
libhdfs.so
+# (dynamic linking).  On Windows, we cannot use the same base name for both
+# static and dynamic, because Windows does not use distinct file extensions
+# for a statically linked library vs. a DLL import library.  Both use the
+# .lib extension.  On Windows, we'll build the static library as
+# hdfs_static.lib.
+if (NOT WIN32)
+set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME 
${LIBNAME})
+endif (NOT WIN32)
 endfunction(add_dual_library)
 
 # Link both a static and a dynamic target against some libraries
@@ -105,11 +113,14 @@ else (WIN32)
 set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -g -Wall -O2)
 set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE)
 set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE 
-D_FILE_OFFSET_BITS=64)
+set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} -fvisibility=hidden)
 set(OS_DIR main/native/libhdfs/os/posix)
 set(OS_LINK_LIBRARIES pthread)
 set(OUT_DIR target/usr/local/lib)
 endif (WIN32)
 
+add_definitions(-DLIBHDFS_DLL_EXPORT)
+
 include_directories(
 ${GENERATED_JAVAH}
 ${CMAKE_CURRENT_SOURCE_DIR}
@@ -150,7 +161,7 @@ add_executable(test_libhdfs_ops
 main/native/libhdfs/test/test_libhdfs_ops.c
 )
 target_link_libraries(test_libhdfs_ops
-hdfs
+hdfs_static
 ${JAVA_JVM_LIBRARY}
 )
 
@@ -158,7 +169,7 @@ add_executable(test_libhdfs_read
 main/native/libhdfs/test/test_libhdfs_read.c
 )
 target_link_libraries(test_libhdfs_read
-hdfs
+hdfs_static
 ${JAVA_JVM_LIBRARY}
 )
 
@@ -166,7 +177,7 @@ add_executable(test_libhdfs_write
 main/native/libhdfs/test/test_libhdfs_write.c
 )
 target_link_libraries(test_libhdfs_write
-hdfs
+hdfs_static
 ${JAVA_JVM_LIBRARY}
 )
 
@@ -196,7 +207,7 @@ add_executable(test_libhdfs_threaded
 ${OS_DIR}/thread.c
 )
 target_link_libraries(test_libhdfs_threaded
-hdfs
+hdfs_static
 native_mini_dfs
 ${OS_LINK_LIBRARIES}
 )
@@ -206,7 +217,7 @@ add_executable(test_libhdfs_zerocopy
 main/native/libhdfs/test/test_libhdfs_zerocopy.c
 )
 target_link_libraries(test_libhdfs_zerocopy
-hdfs
+hdfs_static
 native_mini_dfs
 ${OS_LINK_LIBRARIES}
 )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aca0abef/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h

[29/50] [abbrv] hadoop git commit: YARN-3249. Add a 'kill application' button to Resource Manager's Web UI. Contributed by Ryu Kobayashi.

2015-03-09 Thread jing9
YARN-3249. Add a 'kill application' button to Resource Manager's Web UI. 
Contributed by Ryu Kobayashi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fed87455
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fed87455
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fed87455

Branch: refs/heads/HDFS-7285
Commit: fed87455b03142529e9ceeded1ec942e3fd568ed
Parents: ffa5622
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Mar 5 19:55:56 2015 +0900
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:25 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 
 .../server/resourcemanager/webapp/AppBlock.java | 35 
 3 files changed, 44 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed87455/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4dd61eb..3ea5501 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -94,6 +94,9 @@ Release 2.7.0 - UNRELEASED
 
 YARN-2994. Document work-preserving RM restart. (Jian He via ozawa)
 
+YARN-3249. Add a 'kill application' button to Resource Manager's Web UI.
+(Ryu Kobayashi via ozawa)
+
   IMPROVEMENTS
 
 YARN-3005. [JDK7] Use switch statement for String instead of if-else

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed87455/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ff06eea..25b808e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -184,6 +184,12 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME = 
   false;
 
+  /** Enable Resource Manager webapp ui actions */
+  public static final String RM_WEBAPP_UI_ACTIONS_ENABLED =
+RM_PREFIX + webapp.ui-actions.enabled;
+  public static final boolean DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED =
+true;
+
   /** Whether the RM should enable Reservation System */
   public static final String RM_RESERVATION_SYSTEM_ENABLE = RM_PREFIX
   + reservation-system.enable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed87455/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
index 45df93e..00508b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -62,12 +63,16 @@ public class AppBlock extends HtmlBlock {
 
   private final Configuration conf;
   private final ResourceManager rm;
+  private final boolean rmWebAppUIActions;
 
   @Inject
   AppBlock(ResourceManager rm, ViewContext ctx, Configuration conf) {
 super(ctx);
 this.conf = conf;
 this.rm = rm;
+this.rmWebAppUIActions =
+conf.getBoolean(YarnConfiguration.RM_WEBAPP_UI_ACTIONS_ENABLED,
+

[37/50] [abbrv] hadoop git commit: HDFS-7885. Datanode should not trust the generation stamp provided by client. Contributed by Tsz Wo Nicholas Sze.

2015-03-09 Thread jing9
HDFS-7885. Datanode should not trust the generation stamp provided by client. 
Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/055267d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/055267d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/055267d5

Branch: refs/heads/HDFS-7285
Commit: 055267d50cffb51f28f271f27016df23fae2d222
Parents: 7070347
Author: Jing Zhao ji...@apache.org
Authored: Fri Mar 6 10:55:56 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:26 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 15 +
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java | 63 
 3 files changed, 81 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/055267d5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 763d327..e622a57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1104,6 +1104,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
 
+HDFS-7885. Datanode should not trust the generation stamp provided by
+client. (Tsz Wo Nicholas Sze via jing9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/055267d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index cc6220a..58f5615 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2568,6 +2568,21 @@ class FsDatasetImpl implements 
FsDatasetSpiFsVolumeImpl {
   @Override // FsDatasetSpi
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
   throws IOException {
+synchronized(this) {
+  final Replica replica = volumeMap.get(block.getBlockPoolId(),
+  block.getBlockId());
+  if (replica == null) {
+throw new ReplicaNotFoundException(block);
+  }
+  if (replica.getGenerationStamp()  block.getGenerationStamp()) {
+throw new IOException(
+Replica generation stamp  block generation stamp, block=
++ block + , replica= + replica);
+  } else if (replica.getGenerationStamp()  block.getGenerationStamp()) {
+block.setGenerationStamp(replica.getGenerationStamp());
+  }
+}
+
 File datafile = getBlockFile(block);
 File metafile = FsDatasetUtil.getMetaFile(datafile, 
block.getGenerationStamp());
 BlockLocalPathInfo info = new BlockLocalPathInfo(block,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/055267d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
index cb50539..1c4134f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
@@ -30,11 +30,16 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.UserGroupInformation;
+import 

[40/50] [abbrv] hadoop git commit: YARN-3296. Mark ResourceCalculatorProcessTree class as Public for configurable resource monitoring. Contributed by Hitesh Shah

2015-03-09 Thread jing9
YARN-3296. Mark ResourceCalculatorProcessTree class as Public for configurable 
resource monitoring. Contributed by Hitesh Shah


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b912239
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b912239
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b912239

Branch: refs/heads/HDFS-7285
Commit: 7b912239d7590aff2dbd3e7e5f5f7c2bfdd23e3d
Parents: eed1645
Author: Junping Du junping...@apache.org
Authored: Sun Mar 8 14:47:35 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:27 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../util/ResourceCalculatorProcessTree.java | 25 ++--
 2 files changed, 21 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b912239/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 250fc1c..f28e932 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -366,6 +366,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2190. Added CPU and memory limit options to the default container
 executor for Windows containers. (Chuan Liu via jianhe)
 
+YARN-3296. Mark ResourceCalculatorProcessTree class as Public for 
configurable
+resource monitoring. (Hitesh Shah via junping_du)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b912239/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
index 8c22c9e..6ee8834 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
@@ -22,7 +22,8 @@ import java.lang.reflect.Constructor;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 
@@ -30,7 +31,8 @@ import org.apache.hadoop.conf.Configured;
  * Interface class to obtain process resource usage
  *
  */
-@Private
+@Public
+@Evolving
 public abstract class ResourceCalculatorProcessTree extends Configured {
   static final Log LOG = LogFactory
   .getLog(ResourceCalculatorProcessTree.class);
@@ -90,9 +92,12 @@ public abstract class ResourceCalculatorProcessTree extends 
Configured {
* @param olderThanAge processes above this age are included in the
*  memory addition
* @return cumulative virtual memory used by the process-tree in bytes,
-   *  for processes older than this age.
+   *  for processes older than this age. return 0 if it cannot be
+   *  calculated
*/
-  public abstract long getCumulativeVmem(int olderThanAge);
+  public long getCumulativeVmem(int olderThanAge) {
+return 0;
+  }
 
   /**
* Get the cumulative resident set size (rss) memory used by all the 
processes
@@ -104,7 +109,9 @@ public abstract class ResourceCalculatorProcessTree extends 
Configured {
*  for processes older than this age. return 0 if it cannot be
*  calculated
*/
-  public abstract long getCumulativeRssmem(int olderThanAge);
+  public long getCumulativeRssmem(int olderThanAge) {
+return 0;
+  }
 
   /**
* Get the CPU time in millisecond used by all the processes in the
@@ -113,7 +120,9 @@ public abstract class ResourceCalculatorProcessTree extends 
Configured {
* @return cumulative CPU time in millisecond since the process-tree created
* return 0 if it cannot be calculated
*/
-  public abstract long getCumulativeCpuTime();
+  public long getCumulativeCpuTime() {
+return 0;
+  }
 
   /**
* Get the CPU usage by all the processes in the process-tree based on
@@ -123,7 +132,9 @@ public abstract class ResourceCalculatorProcessTree extends 
Configured {
* @return percentage CPU usage since the 

[04/50] [abbrv] hadoop git commit: HDFS-7757. Misleading error messages in FSImage.java. (Contributed by Brahma Reddy Battula)

2015-03-09 Thread jing9
HDFS-7757. Misleading error messages in FSImage.java. (Contributed by Brahma 
Reddy Battula)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bc27985
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bc27985
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bc27985

Branch: refs/heads/HDFS-7285
Commit: 6bc27985a305a768d10f834ad8d90616cffdbcf6
Parents: 68c9b55
Author: Arpit Agarwal a...@apache.org
Authored: Tue Mar 3 10:55:22 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:22 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/hdfs/server/namenode/FSImage.java   | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bc27985/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fe78097..42430ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1074,6 +1074,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7871. NameNodeEditLogRoller can keep printing Swallowing exception
 message. (jing9)
 
+HDFS-7757. Misleading error messages in FSImage.java. (Brahma Reddy Battula
+via Arpit Agarwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bc27985/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 44c41d0..e589eea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -883,7 +883,7 @@ public class FSImage implements Closeable {
   final long namespace = counts.getNameSpace() - parentNamespace;
   final long nsQuota = q.getNameSpace();
   if (Quota.isViolated(nsQuota, namespace)) {
-LOG.error(BUG: Namespace quota violation in image for 
+LOG.warn(Namespace quota violation in image for 
 + dir.getFullPathName()
 +  quota =  + nsQuota +   consumed =  + namespace);
   }
@@ -891,7 +891,7 @@ public class FSImage implements Closeable {
   final long ssConsumed = counts.getStorageSpace() - parentStoragespace;
   final long ssQuota = q.getStorageSpace();
   if (Quota.isViolated(ssQuota, ssConsumed)) {
-LOG.error(BUG: Storagespace quota violation in image for 
+LOG.warn(Storagespace quota violation in image for 
 + dir.getFullPathName()
 +  quota =  + ssQuota +   consumed =  + ssConsumed);
   }
@@ -903,7 +903,7 @@ public class FSImage implements Closeable {
 parentTypeSpaces.get(t);
 final long typeQuota = q.getTypeSpaces().get(t);
 if (Quota.isViolated(typeQuota, typeSpace)) {
-  LOG.error(BUG: Storage type quota violation in image for 
+  LOG.warn(Storage type quota violation in image for 
   + dir.getFullPathName()
   +  type =  + t.toString() +  quota = 
   + typeQuota +   consumed  + typeSpace);



[26/50] [abbrv] hadoop git commit: HADOOP-11648. Set DomainSocketWatcher thread name explicitly. Contributed by Liang Xie.

2015-03-09 Thread jing9
HADOOP-11648. Set DomainSocketWatcher thread name explicitly. Contributed by 
Liang Xie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5632a4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5632a4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5632a4f

Branch: refs/heads/HDFS-7285
Commit: f5632a4f65e9780eba83c25aa5570f78034f2e41
Parents: b9f374b
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Mar 5 16:05:44 2015 +0900
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:25 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/net/unix/DomainSocketWatcher.java | 8 +---
 .../org/apache/hadoop/net/unix/TestDomainSocketWatcher.java  | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop/hdfs/server/datanode/ShortCircuitRegistry.java| 2 +-
 .../apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java | 3 ++-
 6 files changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5632a4f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d518d9f..92af646 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -647,6 +647,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11658. Externalize io.compression.codecs property.
 (Kai Zheng via aajisaka)
 
+HADOOP-11648. Set DomainSocketWatcher thread name explicitly.
+(Liang Xie via ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5632a4f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
index 8c617dc..03b52e0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
@@ -238,7 +238,8 @@ public final class DomainSocketWatcher implements Closeable 
{
*/
   private boolean kicked = false;
 
-  public DomainSocketWatcher(int interruptCheckPeriodMs) throws IOException {
+  public DomainSocketWatcher(int interruptCheckPeriodMs, String src)
+  throws IOException {
 if (loadingFailureReason != null) {
   throw new UnsupportedOperationException(loadingFailureReason);
 }
@@ -246,8 +247,9 @@ public final class DomainSocketWatcher implements Closeable 
{
 this.interruptCheckPeriodMs = interruptCheckPeriodMs;
 notificationSockets = DomainSocket.socketpair();
 watcherThread.setDaemon(true);
-watcherThread.setUncaughtExceptionHandler(
-new Thread.UncaughtExceptionHandler() {
+watcherThread.setName(src +  DomainSocketWatcher);
+watcherThread
+.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
   @Override
   public void uncaughtException(Thread thread, Throwable t) {
 LOG.error(thread +  terminating on unexpected exception, t);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5632a4f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
index e85e414..4b0e2a8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
@@ -195,7 +195,7 @@ public class TestDomainSocketWatcher {
   private DomainSocketWatcher newDomainSocketWatcher(int 
interruptCheckPeriodMs)
   throws Exception {
 DomainSocketWatcher watcher = new DomainSocketWatcher(
-interruptCheckPeriodMs);
+interruptCheckPeriodMs, getClass().getSimpleName());
 watcher.watcherThread.setUncaughtExceptionHandler(
 new Thread.UncaughtExceptionHandler() {
   @Override


[42/50] [abbrv] hadoop git commit: HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)

2015-03-09 Thread jing9
HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eed1645f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eed1645f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eed1645f

Branch: refs/heads/HDFS-7285
Commit: eed1645fd3a513a2a82cf76b4063a4baf4e819f9
Parents: fd63337
Author: Steve Loughran ste...@apache.org
Authored: Sun Mar 8 11:20:42 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:27 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../org/apache/hadoop/fs/s3a/Constants.java |  6 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 20 ++--
 .../src/site/markdown/tools/hadoop-aws/index.md | 10 +-
 4 files changed, 26 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed1645f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 14cd75a..16002d5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1050,6 +1050,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
 should be non static. (Sean Busbey via yliu)
 
+HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed1645f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index e7462dc..3486dfb 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -18,8 +18,12 @@
 
 package org.apache.hadoop.fs.s3a;
 
-
 public class Constants {
+  // s3 access key
+  public static final String ACCESS_KEY = fs.s3a.access.key;
+
+  // s3 secret key
+  public static final String SECRET_KEY = fs.s3a.secret.key;
 
   // number of simultaneous connections to s3
   public static final String MAXIMUM_CONNECTIONS = fs.s3a.connection.maximum;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed1645f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 1a30d6f..91a606c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -32,8 +32,6 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.hadoop.fs.s3.S3Credentials;
-
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.AmazonServiceException;
 import com.amazonaws.ClientConfiguration;
@@ -159,12 +157,22 @@ public class S3AFileSystem extends FileSystem {
 this.getWorkingDirectory());
 
 // Try to get our credentials or just connect anonymously
-S3Credentials s3Credentials = new S3Credentials();
-s3Credentials.initialize(name, conf);
+String accessKey = conf.get(ACCESS_KEY, null);
+String secretKey = conf.get(SECRET_KEY, null);
+
+String userInfo = name.getUserInfo();
+if (userInfo != null) {
+  int index = userInfo.indexOf(':');
+  if (index != -1) {
+accessKey = userInfo.substring(0, index);
+secretKey = userInfo.substring(index + 1);
+  } else {
+accessKey = userInfo;
+  }
+}
 
 AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
-new BasicAWSCredentialsProvider(s3Credentials.getAccessKey(),
-s3Credentials.getSecretAccessKey()),
+new BasicAWSCredentialsProvider(accessKey, secretKey),
 new InstanceProfileCredentialsProvider(),
 new AnonymousAWSCredentialsProvider()
 );

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed1645f/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 

[07/50] [abbrv] hadoop git commit: MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc comments. Contributed by Akira AJISAKA.

2015-03-09 Thread jing9
MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc 
comments. Contributed by Akira AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a3ef07f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a3ef07f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a3ef07f

Branch: refs/heads/HDFS-7285
Commit: 4a3ef07f4a3dbbb56eedc368a0123e02bc803850
Parents: e208eee
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 3 18:06:26 2015 +0900
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:22 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../hadoop/mapred/TaskAttemptListenerImpl.java  |  4 +-
 .../hadoop/mapreduce/v2/app/JobEndNotifier.java |  1 -
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  2 +-
 .../hadoop/filecache/DistributedCache.java  |  2 +-
 .../org/apache/hadoop/mapred/ClusterStatus.java |  4 +-
 .../apache/hadoop/mapred/FileOutputFormat.java  |  2 +-
 .../java/org/apache/hadoop/mapred/IFile.java|  2 +-
 .../apache/hadoop/mapred/JobACLsManager.java|  1 -
 .../org/apache/hadoop/mapred/JobClient.java |  8 ++--
 .../java/org/apache/hadoop/mapred/JobConf.java  | 49 +---
 .../java/org/apache/hadoop/mapred/Mapper.java   |  2 +-
 .../org/apache/hadoop/mapred/QueueManager.java  | 30 ++--
 .../org/apache/hadoop/mapred/RecordReader.java  |  2 +-
 .../java/org/apache/hadoop/mapred/Reducer.java  | 14 +++---
 .../hadoop/mapred/TaskUmbilicalProtocol.java|  1 -
 .../apache/hadoop/mapred/lib/ChainMapper.java   | 40 
 .../apache/hadoop/mapred/lib/ChainReducer.java  | 44 +-
 .../hadoop/mapred/lib/MultipleOutputs.java  | 29 +---
 .../hadoop/mapred/lib/TokenCountMapper.java |  2 +-
 .../lib/aggregate/ValueAggregatorJob.java   |  2 +-
 .../lib/aggregate/ValueAggregatorReducer.java   |  3 +-
 .../hadoop/mapred/lib/db/DBInputFormat.java |  4 +-
 .../org/apache/hadoop/mapreduce/Cluster.java|  1 +
 .../apache/hadoop/mapreduce/ClusterMetrics.java |  6 +--
 .../apache/hadoop/mapreduce/CryptoUtils.java| 10 ++--
 .../java/org/apache/hadoop/mapreduce/Job.java   |  2 +-
 .../org/apache/hadoop/mapreduce/JobContext.java |  2 -
 .../hadoop/mapreduce/JobSubmissionFiles.java|  2 +-
 .../org/apache/hadoop/mapreduce/Mapper.java |  9 ++--
 .../org/apache/hadoop/mapreduce/Reducer.java| 12 ++---
 .../mapreduce/filecache/DistributedCache.java   |  5 +-
 .../lib/aggregate/ValueAggregatorJob.java   |  2 +-
 .../hadoop/mapreduce/lib/chain/Chain.java   |  4 +-
 .../hadoop/mapreduce/lib/chain/ChainMapper.java | 10 ++--
 .../mapreduce/lib/chain/ChainReducer.java   | 14 +++---
 .../hadoop/mapreduce/lib/db/DBInputFormat.java  |  2 +-
 .../hadoop/mapreduce/lib/db/DBWritable.java |  2 +-
 .../mapreduce/lib/join/TupleWritable.java   |  2 +-
 .../mapreduce/lib/map/MultithreadedMapper.java  |  6 +--
 .../mapreduce/lib/output/FileOutputFormat.java  |  2 +-
 .../mapreduce/lib/output/MultipleOutputs.java   | 11 ++---
 .../lib/partition/BinaryPartitioner.java|  2 +-
 .../hadoop/mapreduce/task/JobContextImpl.java   |  2 -
 .../hadoop/mapreduce/RandomTextWriter.java  |  4 +-
 .../apache/hadoop/mapreduce/RandomWriter.java   |  5 +-
 .../hadoop/examples/MultiFileWordCount.java |  2 +-
 .../apache/hadoop/examples/QuasiMonteCarlo.java |  4 +-
 .../hadoop/examples/RandomTextWriter.java   |  4 +-
 .../apache/hadoop/examples/RandomWriter.java|  5 +-
 .../apache/hadoop/examples/SecondarySort.java   |  2 +-
 .../org/apache/hadoop/examples/pi/DistBbp.java  |  2 +-
 .../apache/hadoop/examples/pi/math/Modular.java |  2 +-
 .../hadoop/examples/terasort/GenSort.java   |  2 +-
 .../org/apache/hadoop/tools/CopyListing.java| 14 +++---
 .../java/org/apache/hadoop/tools/DistCp.java|  4 +-
 .../apache/hadoop/tools/DistCpOptionSwitch.java |  2 +-
 .../org/apache/hadoop/tools/OptionsParser.java  |  2 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |  4 +-
 .../apache/hadoop/tools/mapred/CopyMapper.java  |  5 +-
 .../hadoop/tools/mapred/CopyOutputFormat.java   |  4 +-
 .../tools/mapred/RetriableFileCopyCommand.java  |  3 +-
 .../tools/mapred/UniformSizeInputFormat.java|  4 +-
 .../tools/mapred/lib/DynamicInputFormat.java|  4 +-
 .../tools/mapred/lib/DynamicRecordReader.java   | 12 ++---
 .../apache/hadoop/tools/util/DistCpUtils.java   |  2 +-
 .../hadoop/tools/util/RetriableCommand.java |  2 +-
 .../hadoop/tools/util/ThrottledInputStream.java |  8 ++--
 .../java/org/apache/hadoop/tools/Logalyzer.java |  4 +-
 .../ResourceUsageEmulatorPlugin.java|  2 +-
 .../fs/swift/http/RestClientBindings.java   |  6 +--
 .../hadoop/fs/swift/http/SwiftRestClient.java   |  6 +--
 .../fs/swift/snative/SwiftNativeFileSystem.java |  6 +--
 

[11/50] [abbrv] hadoop git commit: MAPREDUCE-6248. Exposed the internal MapReduce job's information as a public API in DistCp. Contributed by Jing Zhao.

2015-03-09 Thread jing9
MAPREDUCE-6248. Exposed the internal MapReduce job's information as a public 
API in DistCp. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bab6209c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bab6209c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bab6209c

Branch: refs/heads/HDFS-7285
Commit: bab6209c170d1127680f8d0e975e2e54e9c63ccc
Parents: ff1b358
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 3 16:28:22 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:23 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../java/org/apache/hadoop/tools/DistCp.java| 47 +++-
 2 files changed, 39 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bab6209c/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 7a2eff3..b2ae9d9 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -320,6 +320,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-5612. Add javadoc for TaskCompletionEvent.Status.
 (Chris Palmer via aajisaka)
 
+MAPREDUCE-6248. Exposed the internal MapReduce job's information as a 
public
+API in DistCp. (Jing Zhao via vinodkv)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bab6209c/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index 28535a7..b80aeb8 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.tools;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -51,12 +53,14 @@ import com.google.common.annotations.VisibleForTesting;
  * launch the copy-job. DistCp may alternatively be sub-classed to fine-tune
  * behaviour.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public class DistCp extends Configured implements Tool {
 
   /**
-   * Priority of the ResourceManager shutdown hook.
+   * Priority of the shutdown hook.
*/
-  public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+  static final int SHUTDOWN_HOOK_PRIORITY = 30;
 
   private static final Log LOG = LogFactory.getLog(DistCp.class);
 
@@ -66,7 +70,7 @@ public class DistCp extends Configured implements Tool {
   private static final String PREFIX = _distcp;
   private static final String WIP_PREFIX = ._WIP_;
   private static final String DISTCP_DEFAULT_XML = distcp-default.xml;
-  public static final Random rand = new Random();
+  static final Random rand = new Random();
 
   private boolean submitted;
   private FileSystem jobFS;
@@ -90,7 +94,7 @@ public class DistCp extends Configured implements Tool {
* To be used with the ToolRunner. Not for public consumption.
*/
   @VisibleForTesting
-  public DistCp() {}
+  DistCp() {}
 
   /**
* Implementation of Tool::run(). Orchestrates the copy of source file(s)
@@ -100,6 +104,7 @@ public class DistCp extends Configured implements Tool {
* @param argv List of arguments passed to DistCp, from the ToolRunner.
* @return On success, it returns 0. Else, -1.
*/
+  @Override
   public int run(String[] argv) {
 if (argv.length  1) {
   OptionsParser.usage();
@@ -145,9 +150,21 @@ public class DistCp extends Configured implements Tool {
* @throws Exception
*/
   public Job execute() throws Exception {
+Job job = createAndSubmitJob();
+
+if (inputOptions.shouldBlock()) {
+  waitForJobCompletion(job);
+}
+return job;
+  }
+
+  /**
+   * Create and submit the mapreduce job.
+   * @return The mapreduce job object that has been submitted
+   */
+  public Job createAndSubmitJob() throws Exception {
 assert inputOptions != null;
 assert getConf() != null;
-
 Job job = null;
 try {
   synchronized(this) {
@@ -169,16 +186,24 @@ public class DistCp extends Configured implements Tool {
 
 String jobID = 

hadoop git commit: HADOOP-11692. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang.

2015-03-09 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 42e3a8051 - de1101cb5


HADOOP-11692. Improve authentication failure WARN message to avoid user 
confusion. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de1101cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de1101cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de1101cb

Branch: refs/heads/trunk
Commit: de1101cb5be2d8efd0ef4945f64ccfe7cbd01049
Parents: 42e3a80
Author: Yongjun Zhang yzh...@cloudera.com
Authored: Mon Mar 9 11:55:32 2015 -0700
Committer: Yongjun Zhang yzh...@cloudera.com
Committed: Mon Mar 9 11:55:32 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/Server.java | 9 +++--
 2 files changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de1101cb/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 37604c4..d5a8463 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1070,6 +1070,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN.
 (Duo Zhang via wheat9)
 
+HADOOP-11692. Improve authentication failure WARN message to avoid user
+confusion. (Yongjun Zhang)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de1101cb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 893e0eb..d2d61b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1324,10 +1324,15 @@ public abstract class Server {
   saslResponse = processSaslMessage(saslMessage);
 } catch (IOException e) {
   rpcMetrics.incrAuthenticationFailures();
+  if (LOG.isDebugEnabled()) {
+LOG.debug(StringUtils.stringifyException(e));
+  }
   // attempting user could be null
+  IOException tce = (IOException) getTrueCause(e);
   AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + :
-  + attemptingUser +  ( + e.getLocalizedMessage() + ));
-  throw (IOException) getTrueCause(e);
+  + attemptingUser +  ( + e.getLocalizedMessage()
+  + ) with true cause: ( + tce.getLocalizedMessage() + ));
+  throw tce;
 }
 
 if (saslServer != null  saslServer.isComplete()) {



hadoop git commit: HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD and Solaris in addition to Linux (Kiran Kumar M R via Colin P. McCabe)

2015-03-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk de1101cb5 - 3241fc2b1


HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD 
and Solaris in addition to Linux (Kiran Kumar M R via Colin P.  McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3241fc2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3241fc2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3241fc2b

Branch: refs/heads/trunk
Commit: 3241fc2b17f11e621d8ffb6160caa4b850c278b6
Parents: de1101c
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Mar 9 12:56:33 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Mar 9 12:56:33 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 
 .../hadoop/crypto/random/OpensslSecureRandom.c  | 16 +++-
 2 files changed, 19 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3241fc2b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d5a8463..0fe5b7c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -671,6 +671,10 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0.
 (Shashank Khandelwal and Ivan Mitic via cnauroth)
 
+HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support
+FreeBSD and Solaris in addition to Linux (Kiran Kumar M R via Colin P.
+McCabe)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3241fc2b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
index 6c31d10..f30ccbe 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
@@ -29,6 +29,10 @@
 #include sys/types.h
 #endif
 
+#if defined(__FreeBSD__)
+#include pthread_np.h
+#endif
+
 #ifdef WINDOWS
 #include windows.h
 #endif
@@ -274,7 +278,17 @@ static void pthreads_locking_callback(int mode, int type, 
char *file, int line)
 
 static unsigned long pthreads_thread_id(void)
 {
-  return (unsigned long)syscall(SYS_gettid);
+  unsigned long thread_id = 0;
+#if defined(__linux__)
+  thread_id = (unsigned long)syscall(SYS_gettid);
+#elif defined(__FreeBSD__)
+  thread_id = (unsigned long)pthread_getthreadid_np();
+#elif defined(__sun)
+  thread_id = (unsigned long)pthread_self();
+#else
+#error Platform not supported
+#endif
+  return thread_id;
 }
 
 #endif /* UNIX */



[46/50] [abbrv] hadoop git commit: Revert HDFS-7857. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang.

2015-03-09 Thread jing9
Revert HDFS-7857. Improve authentication failure WARN message to avoid user 
confusion. Contributed by Yongjun Zhang.

This reverts commit d799fbe1ccf8752c44f087e34b5f400591d3b5bd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38b921a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38b921a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38b921a8

Branch: refs/heads/HDFS-7285
Commit: 38b921a88feeffb4e12070ed866cb0f171c36f8c
Parents: 129f88a
Author: Yongjun Zhang yzh...@cloudera.com
Authored: Sun Mar 8 20:54:43 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:17:55 2015 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/Server.java | 9 ++---
 1 file changed, 2 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38b921a8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index d2d61b3..893e0eb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1324,15 +1324,10 @@ public abstract class Server {
   saslResponse = processSaslMessage(saslMessage);
 } catch (IOException e) {
   rpcMetrics.incrAuthenticationFailures();
-  if (LOG.isDebugEnabled()) {
-LOG.debug(StringUtils.stringifyException(e));
-  }
   // attempting user could be null
-  IOException tce = (IOException) getTrueCause(e);
   AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + :
-  + attemptingUser +  ( + e.getLocalizedMessage()
-  + ) with true cause: ( + tce.getLocalizedMessage() + ));
-  throw tce;
+  + attemptingUser +  ( + e.getLocalizedMessage() + ));
+  throw (IOException) getTrueCause(e);
 }
 
 if (saslServer != null  saslServer.isComplete()) {



[27/50] [abbrv] hadoop git commit: MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. Contributed by Brahma Reddy Battula.

2015-03-09 Thread jing9
MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9f374be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9f374be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9f374be

Branch: refs/heads/HDFS-7285
Commit: b9f374be0a24678df6c7b2301df65e48b7de6629
Parents: 31d3efe
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Mar 5 14:12:47 2015 +0900
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:25 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java | 4 
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f374be/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 212727e..d0d8216 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -414,6 +414,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6268. Fix typo in Task Attempt API's URL. (Ryu Kobayashi
 via ozawa)
 
+MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. (Brahma 
+Reddy Battula via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f374be/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 8d5be86..5d3ad5b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -1451,10 +1451,6 @@ public class MRAppMaster extends CompositeService {
   String jobUserName = System
   .getenv(ApplicationConstants.Environment.USER.name());
   conf.set(MRJobConfig.USER_NAME, jobUserName);
-  // Do not automatically close FileSystem objects so that in case of
-  // SIGTERM I have a chance to write out the job history. I'll be closing
-  // the objects myself.
-  conf.setBoolean(fs.automatic.close, false);
   initAndStartAppMaster(appMaster, conf, jobUserName);
 } catch (Throwable t) {
   LOG.fatal(Error starting MRAppMaster, t);



[31/50] [abbrv] hadoop git commit: YARN-2786. Created a yarn cluster CLI and seeded with one command for listing node-labels collection. Contributed by Wangda Tan.

2015-03-09 Thread jing9
YARN-2786. Created a yarn cluster CLI and seeded with one command for listing 
node-labels collection. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a638ed6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a638ed6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a638ed6

Branch: refs/heads/HDFS-7285
Commit: 7a638ed67a225ea45b258db6926f51bb354c1564
Parents: 22b1f53
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu Mar 5 10:54:34 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:25 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn|   6 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd|   6 +
 .../hadoop/yarn/client/cli/ClusterCLI.java  | 157 ++
 .../hadoop/yarn/client/cli/TestClusterCLI.java  | 158 +++
 5 files changed, 330 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a638ed6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5f61462..dcf328f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -97,6 +97,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3249. Add a 'kill application' button to Resource Manager's Web UI.
 (Ryu Kobayashi via ozawa)
 
+YARN-2786. Created a yarn cluster CLI and seeded with one command for 
listing
+node-labels collection. (Wangda Tan via vinodkv)
+
   IMPROVEMENTS
 
 YARN-3005. [JDK7] Use switch statement for String instead of if-else

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a638ed6/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index f1a06a6..e6af4ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -25,6 +25,7 @@ function hadoop_usage
   echo   applicationattemptprints applicationattempt(s) 
report
   echo   classpath prints the class path needed 
to get the
   echo Hadoop jar and the required 
libraries
+  echo   cluster   prints cluster information
   echo   container prints container(s) report
   echo   daemonlog get/set the log level for each 
daemon
   echo   jar jar run a jar file
@@ -83,6 +84,11 @@ case ${COMMAND} in
   classpath)
 hadoop_do_classpath_subcommand $@
   ;;
+  cluster)
+CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
+hadoop_debug Append YARN_CLIENT_OPTS onto YARN_OPTS
+YARN_OPTS=${YARN_OPTS} ${YARN_CLIENT_OPTS}
+  ;;
   daemonlog)
 CLASS=org.apache.hadoop.log.LogLevel
 hadoop_debug Append YARN_CLIENT_OPTS onto HADOOP_OPTS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a638ed6/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index 3f68b16..c29ee53 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -192,6 +192,11 @@ goto :eof
   set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
   goto :eof
 
+:cluster
+  set CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
+  set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+  goto :eof
+
 :container
   set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
   set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
@@ -312,6 +317,7 @@ goto :eof
   @echo   jar ^jar^  run a jar file
   @echo   application  prints application(s) report/kill application
   @echo   applicationattempt   prints applicationattempt(s) report
+  @echo   cluster  prints cluster information
   @echo   containerprints container(s) report
   @echo   node prints node report(s)
   @echo   queueprints queue information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a638ed6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
 

[36/50] [abbrv] hadoop git commit: HDFS-6488. Support HDFS superuser in NFSv3 gateway. Contributed by Brandon Li

2015-03-09 Thread jing9
HDFS-6488. Support HDFS superuser in NFSv3 gateway. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2f91d9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2f91d9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2f91d9b

Branch: refs/heads/HDFS-7285
Commit: a2f91d9b0c5131b2f76d4852e9c649e446b873bf
Parents: dfc015f
Author: Brandon Li brando...@apache.org
Authored: Fri Mar 6 15:19:45 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:26 2015 -0700

--
 .../hadoop/hdfs/nfs/conf/NfsConfigKeys.java | 14 +++
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java| 12 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../src/site/markdown/HdfsNfsGateway.md | 44 +---
 4 files changed, 64 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f91d9b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
index 9e4aaf5..09ee579 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
@@ -73,4 +73,18 @@ public class NfsConfigKeys {
   
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_KEY = 
nfs.metrics.percentiles.intervals;
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = ;
+  
+  /*
+   * HDFS super-user is the user with the same identity as NameNode process
+   * itself and the super-user can do anything in that permissions checks never
+   * fail for the super-user. If the following property is configured, the
+   * superuser on NFS client can access any file on HDFS. By default, the super
+   * user is not configured in the gateway. Note that, even the the superuser 
is
+   * configured, nfs.exports.allowed.hosts still takes effect. For example,
+   * the superuser will not have write access to HDFS files through the gateway
+   * if the NFS client host is not allowed to have write access in
+   * nfs.exports.allowed.hosts.
+   */
+  public static final String  NFS_SUPERUSER_KEY = nfs.superuser;
+  public static final String  NFS_SUPERUSER_DEFAULT = ;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f91d9b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 05d0674..268abba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Options;
@@ -166,6 +165,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
   private JvmPauseMonitor pauseMonitor;
   private Nfs3HttpServer infoServer = null;
   static Nfs3Metrics metrics;
+  private String superuser;
 
   public RpcProgramNfs3(NfsConfiguration config, DatagramSocket 
registrationSocket,
   boolean allowInsecurePorts) throws IOException {
@@ -200,6 +200,9 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 UserGroupInformation.setConfiguration(config);
 SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
 NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
+superuser = config.get(NfsConfigKeys.NFS_SUPERUSER_KEY,
+NfsConfigKeys.NFS_SUPERUSER_DEFAULT);
+LOG.info(Configured HDFS superuser is  + superuser);
 
 if (!enableDump) {
   writeDumpDir = null;
@@ -583,13 +586,18 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 }
 
 try {
-  // HDFS-5804 removed supserUserClient access
   attrs = writeManager.getFileAttr(dfsClient, 

[22/50] [abbrv] hadoop git commit: MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own class. (Chris Trezzo via kasha)

2015-03-09 Thread jing9
MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own 
class. (Chris Trezzo via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba4d888d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba4d888d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba4d888d

Branch: refs/heads/HDFS-7285
Commit: ba4d888d037125e1434bd6d238c41041b2beefa7
Parents: 39535ec
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 14:42:07 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:24 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../java/org/apache/hadoop/mapreduce/Job.java   |   1 +
 .../hadoop/mapreduce/JobResourceUploader.java   | 363 +++
 .../apache/hadoop/mapreduce/JobSubmitter.java   | 312 +---
 4 files changed, 370 insertions(+), 309 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba4d888d/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b2ae9d9..212727e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -323,6 +323,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6248. Exposed the internal MapReduce job's information as a 
public
 API in DistCp. (Jing Zhao via vinodkv)
 
+MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own 
+class. (Chris Trezzo via kasha)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba4d888d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index f404175..9eea4cc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -98,6 +98,7 @@ public class Job extends JobContextImpl implements JobContext 
{
 mapreduce.client.genericoptionsparser.used;
   public static final String SUBMIT_REPLICATION = 
 mapreduce.client.submit.file.replication;
+  public static final int DEFAULT_SUBMIT_REPLICATION = 10;
 
   @InterfaceStability.Evolving
   public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba4d888d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
new file mode 100644
index 000..eebdf88
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -0,0 +1,363 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.UnknownHostException;
+
+import 

[16/50] [abbrv] hadoop git commit: YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending jobs. (Siqi Li via kasha)

2015-03-09 Thread jing9
YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending 
jobs. (Siqi Li via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d138804e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d138804e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d138804e

Branch: refs/heads/HDFS-7285
Commit: d138804e49735995653a37efa19589f9cdf13879
Parents: 521a196
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 18:06:36 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:24 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/fair/FairScheduler.java   |   1 +
 .../scheduler/fair/MaxRunningAppsEnforcer.java  |  40 ++-
 .../scheduler/fair/TestFairScheduler.java   | 310 ++-
 4 files changed, 348 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d138804e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0b71bee..9a52325 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -697,6 +697,9 @@ Release 2.7.0 - UNRELEASED
 
 YARN-3131. YarnClientImpl should check FAILED and KILLED state in
 submitApplication (Chang Li via jlowe)
+
+YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with 
pending 
+jobs. (Siqi Li via kasha)
 
 Release 2.6.0 - 2014-11-18
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d138804e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 2b59716..e8a9555 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1477,6 +1477,7 @@ public class FairScheduler extends
 allocConf = queueInfo;
 allocConf.getDefaultSchedulingPolicy().initialize(clusterResource);
 queueMgr.updateAllocationConfiguration(allocConf);
+maxRunningEnforcer.updateRunnabilityOnReload();
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d138804e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
index 2c90edd..f750438 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
@@ -105,6 +105,26 @@ public class MaxRunningAppsEnforcer {
   }
 
   /**
+   * This is called after reloading the allocation configuration when the
+   * scheduler is reinitilized
+   *
+   * Checks to see whether any non-runnable applications become runnable
+   * now that the max running apps of given queue has been changed
+   *
+   * Runs in O(n) where n is the number of apps that are non-runnable and in
+   * the queues that went from having no slack to having slack.
+   */
+  public void updateRunnabilityOnReload() {
+FSParentQueue rootQueue = scheduler.getQueueManager().getRootQueue();
+ListListFSAppAttempt appsNowMaybeRunnable =
+new ArrayListListFSAppAttempt();
+
+

[41/50] [abbrv] hadoop git commit: HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0. Contributed by Shashank Khandelwal and Ivan Mitic.

2015-03-09 Thread jing9
HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0. Contributed by 
Shashank Khandelwal and Ivan Mitic.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd633373
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd633373
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd633373

Branch: refs/heads/HDFS-7285
Commit: fd63337314557e9c8078e0e30ce7e43a05698594
Parents: a2f91d9
Author: cnauroth cnaur...@apache.org
Authored: Fri Mar 6 14:59:09 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:27 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 hadoop-project/pom.xml  |  6 +-
 hadoop-tools/hadoop-azure/pom.xml   |  7 +-
 .../fs/azure/AzureNativeFileSystemStore.java| 37 ++-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 10 +--
 .../hadoop/fs/azure/PageBlobFormatHelpers.java  |  2 +-
 .../hadoop/fs/azure/PageBlobInputStream.java|  8 +--
 .../hadoop/fs/azure/PageBlobOutputStream.java   |  8 +--
 .../hadoop/fs/azure/SelfRenewingLease.java  |  6 +-
 .../fs/azure/SelfThrottlingIntercept.java   | 10 +--
 .../hadoop/fs/azure/SendRequestIntercept.java   | 16 +++--
 .../hadoop/fs/azure/StorageInterface.java   | 24 +++
 .../hadoop/fs/azure/StorageInterfaceImpl.java   | 46 +++--
 .../fs/azure/metrics/ErrorMetricUpdater.java|  8 +--
 .../metrics/ResponseReceivedMetricUpdater.java  | 10 +--
 .../fs/azure/AzureBlobStorageTestAccount.java   | 28 
 .../hadoop/fs/azure/MockStorageInterface.java   | 70 ++--
 .../fs/azure/NativeAzureFileSystemBaseTest.java |  6 +-
 .../TestAzureFileSystemErrorConditions.java |  6 +-
 .../hadoop/fs/azure/TestBlobDataValidation.java | 20 +++---
 .../hadoop/fs/azure/TestContainerChecks.java|  6 +-
 .../TestOutOfBandAzureBlobOperationsLive.java   |  4 +-
 .../fs/azure/TestWasbUriAndConfiguration.java   |  4 +-
 23 files changed, 190 insertions(+), 155 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 628faa3..14cd75a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -653,6 +653,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11648. Set DomainSocketWatcher thread name explicitly.
 (Liang Xie via ozawa)
 
+HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0.
+(Shashank Khandelwal and Ivan Mitic via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 2c0f03a..a6127c7 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -902,9 +902,9 @@
   /dependency
 
   dependency
-groupIdcom.microsoft.windowsazure.storage/groupId
-artifactIdmicrosoft-windowsazure-storage-sdk/artifactId
-version0.6.0/version
+groupIdcom.microsoft.azure/groupId
+artifactIdazure-storage/artifactId
+version2.0.0/version
  /dependency
 
  dependency

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure/pom.xml 
b/hadoop-tools/hadoop-azure/pom.xml
index d39dd76..e9b3af7 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -140,12 +140,13 @@
   artifactIdhttpclient/artifactId
   scopecompile/scope
 /dependency
-
+
 dependency
-  groupIdcom.microsoft.windowsazure.storage/groupId
-  artifactIdmicrosoft-windowsazure-storage-sdk/artifactId
+  groupIdcom.microsoft.azure/groupId
+  artifactIdazure-storage/artifactId
   scopecompile/scope
 /dependency
+
 
 dependency
   groupIdcom.google.guava/groupId

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index c0c03b3..b664fe7 100644
--- 

[05/50] [abbrv] hadoop git commit: MAPREDUCE-5583. Ability to limit running map and reduce tasks. Contributed by Jason Lowe.

2015-03-09 Thread jing9
MAPREDUCE-5583. Ability to limit running map and reduce tasks. Contributed by 
Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68c9b55e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68c9b55e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68c9b55e

Branch: refs/heads/HDFS-7285
Commit: 68c9b55e9d3ff5959b750502724d9c3db23171c1
Parents: 4a3ef07
Author: Junping Du junping...@apache.org
Authored: Tue Mar 3 02:01:04 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:22 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../v2/app/rm/RMContainerAllocator.java |  65 +-
 .../v2/app/rm/RMContainerRequestor.java |  74 ++-
 .../v2/app/rm/TestRMContainerAllocator.java | 214 +++
 .../apache/hadoop/mapreduce/MRJobConfig.java|   8 +
 .../src/main/resources/mapred-default.xml   |  16 ++
 6 files changed, 363 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c9b55e/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5524b14..7a2eff3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -258,6 +258,9 @@ Release 2.7.0 - UNRELEASED
 
 MAPREDUCE-6228. Add truncate operation to SLive. (Plamen Jeliazkov via shv)
 
+MAPREDUCE-5583. Ability to limit running map and reduce tasks. 
+(Jason Lowe via junping_du)
+
   IMPROVEMENTS
 
 MAPREDUCE-6149. Document override log4j.properties in MR job.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c9b55e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 1acfeec..efea674 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -99,9 +99,9 @@ public class RMContainerAllocator extends RMContainerRequestor
   public static final 
   float DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART = 0.05f;
   
-  private static final Priority PRIORITY_FAST_FAIL_MAP;
-  private static final Priority PRIORITY_REDUCE;
-  private static final Priority PRIORITY_MAP;
+  static final Priority PRIORITY_FAST_FAIL_MAP;
+  static final Priority PRIORITY_REDUCE;
+  static final Priority PRIORITY_MAP;
 
   @VisibleForTesting
   public static final String RAMPDOWN_DIAGNOSTIC = Reducer preempted 
@@ -166,6 +166,8 @@ public class RMContainerAllocator extends 
RMContainerRequestor
*/
   private long allocationDelayThresholdMs = 0;
   private float reduceSlowStart = 0;
+  private int maxRunningMaps = 0;
+  private int maxRunningReduces = 0;
   private long retryInterval;
   private long retrystartTime;
   private Clock clock;
@@ -201,6 +203,10 @@ public class RMContainerAllocator extends 
RMContainerRequestor
 allocationDelayThresholdMs = conf.getInt(
 MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC,
 MRJobConfig.DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC) * 1000;//sec - 
ms
+maxRunningMaps = conf.getInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT,
+MRJobConfig.DEFAULT_JOB_RUNNING_MAP_LIMIT);
+maxRunningReduces = conf.getInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT,
+MRJobConfig.DEFAULT_JOB_RUNNING_REDUCE_LIMIT);
 RackResolver.init(conf);
 retryInterval = 
getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS,
 
MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS);
@@ -664,6 +670,8 @@ public class RMContainerAllocator extends 
RMContainerRequestor
   
   @SuppressWarnings(unchecked)
   private ListContainer getResources() throws Exception {
+applyConcurrentTaskLimits();
+
 // will be null the first time
 Resource headRoom =
 getAvailableResources() == null ? Resources.none() :
@@ -778,6 +786,43 @@ public class RMContainerAllocator extends 
RMContainerRequestor
 return newContainers;
   }
 
+  private void applyConcurrentTaskLimits() {
+int numScheduledMaps = 

[50/50] [abbrv] hadoop git commit: HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode. Contributed by Jing Zhao.

2015-03-09 Thread jing9
HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edc476bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edc476bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edc476bf

Branch: refs/heads/HDFS-7285
Commit: edc476bfce31e8e9b20ad9b23a8312e186dc8cab
Parents: 35a08d8
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 2 13:44:33 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:21:19 2015 -0700

--
 .../hdfs/server/blockmanagement/DecommissionManager.java| 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edc476bf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index dc17abe..3765dd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -545,7 +545,7 @@ public class DecommissionManager {
   int underReplicatedInOpenFiles = 0;
   while (it.hasNext()) {
 numBlocksChecked++;
-final BlockInfoContiguous block = (BlockInfoContiguous) it.next();
+final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted
 if (blockManager.blocksMap.getStoredBlock(block) == null) {
@@ -579,8 +579,9 @@ public class DecommissionManager {
 }
 
 // Even if the block is under-replicated, 
-// it doesn't block decommission if it's sufficiently replicated 
-if (isSufficientlyReplicated(block, bc, num)) {
+// it doesn't block decommission if it's sufficiently replicated
+BlockInfoContiguous blk = (BlockInfoContiguous) block;
+if (isSufficientlyReplicated(blk, bc, num)) {
   if (pruneSufficientlyReplicated) {
 it.remove();
   }
@@ -589,7 +590,7 @@ public class DecommissionManager {
 
 // We've found an insufficiently replicated block.
 if (insufficientlyReplicated != null) {
-  insufficientlyReplicated.add(block);
+  insufficientlyReplicated.add(blk);
 }
 // Log if this is our first time through
 if (firstReplicationLog) {



[32/50] [abbrv] hadoop git commit: HDFS-7855. Separate class Packet from DFSOutputStream. Contributed by Li Bo.

2015-03-09 Thread jing9
HDFS-7855. Separate class Packet from DFSOutputStream. Contributed by Li Bo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8bb732f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8bb732f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8bb732f

Branch: refs/heads/HDFS-7285
Commit: d8bb732fa7ddd4f4a55aead7160a8b6290b9446d
Parents: 7a638ed
Author: Jing Zhao ji...@apache.org
Authored: Thu Mar 5 10:57:48 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:25 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 238 +++-
 .../java/org/apache/hadoop/hdfs/DFSPacket.java  | 270 +++
 .../org/apache/hadoop/hdfs/TestDFSPacket.java   |  68 +
 4 files changed, 381 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8bb732f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 59f69fb..763d327 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -715,6 +715,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11648. Set DomainSocketWatcher thread name explicitly.
 (Liang Xie via ozawa)
 
+HDFS-7855. Separate class Packet from DFSOutputStream. (Li Bo bia jing9)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8bb732f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
old mode 100644
new mode 100755
index dc2f674..130bb6e
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -30,7 +30,6 @@ import java.io.OutputStream;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
-import java.nio.BufferOverflowException;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -79,7 +78,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
@@ -160,9 +158,9 @@ public class DFSOutputStream extends FSOutputSummer
   private final int bytesPerChecksum; 
 
   // both dataQueue and ackQueue are protected by dataQueue lock
-  private final LinkedListPacket dataQueue = new LinkedListPacket();
-  private final LinkedListPacket ackQueue = new LinkedListPacket();
-  private Packet currentPacket = null;
+  private final LinkedListDFSPacket dataQueue = new LinkedListDFSPacket();
+  private final LinkedListDFSPacket ackQueue = new LinkedListDFSPacket();
+  private DFSPacket currentPacket = null;
   private DataStreamer streamer;
   private long currentSeqno = 0;
   private long lastQueuedSeqno = -1;
@@ -187,8 +185,8 @@ public class DFSOutputStream extends FSOutputSummer
   BlockStoragePolicySuite.createDefaultSuite();
 
   /** Use {@link ByteArrayManager} to create buffer for non-heartbeat 
packets.*/
-  private Packet createPacket(int packetSize, int chunksPerPkt, long 
offsetInBlock,
-  long seqno) throws InterruptedIOException {
+  private DFSPacket createPacket(int packetSize, int chunksPerPkt, long 
offsetInBlock,
+  long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
 final byte[] buf;
 final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;
 
@@ -201,171 +199,20 @@ public class DFSOutputStream extends FSOutputSummer
   throw iioe;
 }
 
-return new Packet(buf, chunksPerPkt, offsetInBlock, seqno, 
getChecksumSize());
+return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
+ getChecksumSize(), lastPacketInBlock);
   }
 
   /**
* For heartbeat packets, create buffer directly by new byte[]
   

[38/50] [abbrv] hadoop git commit: YARN-3227. Timeline renew delegation token fails when RM user's TGT is expired. Contributed by Zhijie Shen

2015-03-09 Thread jing9
YARN-3227. Timeline renew delegation token fails when RM user's TGT is
expired. Contributed by Zhijie Shen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6199e76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6199e76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6199e76

Branch: refs/heads/HDFS-7285
Commit: c6199e76ea86d07f8d96381d5a1ba45999bfdb4e
Parents: 055267d
Author: Xuan xg...@apache.org
Authored: Fri Mar 6 13:32:05 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:26 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java| 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6199e76/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index accde78..d073169 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -713,6 +713,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher 
receiving 
 events for old client. (Zhihai Xu via kasha)
 
+YARN-3227. Timeline renew delegation token fails when RM user's TGT is 
expired
+(Zhijie Shen via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6199e76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index af68492..c05d65b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -439,6 +439,7 @@ public class TimelineClientImpl extends TimelineClient {
 UserGroupInformation callerUGI = isProxyAccess ?
 UserGroupInformation.getCurrentUser().getRealUser()
 : UserGroupInformation.getCurrentUser();
+callerUGI.checkTGTAndReloginFromKeytab();
 try {
   return callerUGI.doAs(action);
 } catch (UndeclaredThrowableException e) {
@@ -488,6 +489,7 @@ public class TimelineClientImpl extends TimelineClient {
   : UserGroupInformation.getCurrentUser();
   final String doAsUser = isProxyAccess ?
   UserGroupInformation.getCurrentUser().getShortUserName() : null;
+  callerUGI.checkTGTAndReloginFromKeytab();
   try {
 return callerUGI.doAs(new 
PrivilegedExceptionActionHttpURLConnection() {
   @Override



[24/50] [abbrv] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong

2015-03-09 Thread jing9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
index 7bac6f2..2cd7580 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
@@ -20,15 +20,16 @@ package 
org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
 import static org.apache.hadoop.yarn.webapp.Params.TITLE;
 import static org.mockito.Mockito.mock;
-import org.junit.Assert;
 
+import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
 import 
org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
 import 
org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerImpl;
 import 
org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
@@ -68,8 +69,8 @@ public class TestAHSWebApp extends 
ApplicationHistoryStoreTestUtils {
   @Test
   public void testView() throws Exception {
 Injector injector =
-WebAppTests.createMockInjector(ApplicationContext.class,
-  mockApplicationHistoryManager(5, 1, 1));
+WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+  mockApplicationHistoryClientService(5, 1, 1));
 AHSView ahsViewInstance = injector.getInstance(AHSView.class);
 
 ahsViewInstance.render();
@@ -89,8 +90,8 @@ public class TestAHSWebApp extends 
ApplicationHistoryStoreTestUtils {
   @Test
   public void testAppPage() throws Exception {
 Injector injector =
-WebAppTests.createMockInjector(ApplicationContext.class,
-  mockApplicationHistoryManager(1, 5, 1));
+WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+  mockApplicationHistoryClientService(1, 5, 1));
 AppPage appPageInstance = injector.getInstance(AppPage.class);
 
 appPageInstance.render();
@@ -105,8 +106,8 @@ public class TestAHSWebApp extends 
ApplicationHistoryStoreTestUtils {
   @Test
   public void testAppAttemptPage() throws Exception {
 Injector injector =
-WebAppTests.createMockInjector(ApplicationContext.class,
-  mockApplicationHistoryManager(1, 1, 5));
+WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+  mockApplicationHistoryClientService(1, 1, 5));
 AppAttemptPage appAttemptPageInstance =
 injector.getInstance(AppAttemptPage.class);
 
@@ -123,8 +124,8 @@ public class TestAHSWebApp extends 
ApplicationHistoryStoreTestUtils {
   @Test
   public void testContainerPage() throws Exception {
 Injector injector =
-WebAppTests.createMockInjector(ApplicationContext.class,
-  mockApplicationHistoryManager(1, 1, 1));
+WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+  mockApplicationHistoryClientService(1, 1, 1));
 ContainerPage containerPageInstance =
 injector.getInstance(ContainerPage.class);
 
@@ -141,10 +142,12 @@ public class TestAHSWebApp extends 
ApplicationHistoryStoreTestUtils {
 WebAppTests.flushOutput(injector);
   }
 
-  ApplicationHistoryManager mockApplicationHistoryManager(int numApps,
+  ApplicationHistoryClientService mockApplicationHistoryClientService(int 
numApps,
   int numAppAttempts, int numContainers) throws Exception {
 ApplicationHistoryManager ahManager =
 new MockApplicationHistoryManagerImpl(store);
+ApplicationHistoryClientService historyClientService =
+new ApplicationHistoryClientService(ahManager);
 for (int i = 1; i = numApps; ++i) {
   ApplicationId appId = 

[18/50] [abbrv] hadoop git commit: YARN-3122. Metrics for container's actual CPU usage. (Anubhav Dhoot via kasha)

2015-03-09 Thread jing9
YARN-3122. Metrics for container's actual CPU usage. (Anubhav Dhoot via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97adb9aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97adb9aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97adb9aa

Branch: refs/heads/HDFS-7285
Commit: 97adb9aa39ef01e5d38039044cf90b351fd21c30
Parents: e93eee9
Author: Karthik Kambatla ka...@apache.org
Authored: Wed Mar 4 17:33:30 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:11:24 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../apache/hadoop/yarn/util/CpuTimeTracker.java | 99 
 .../util/LinuxResourceCalculatorPlugin.java | 46 +++--
 .../yarn/util/ProcfsBasedProcessTree.java   | 77 ++-
 .../util/ResourceCalculatorProcessTree.java | 12 ++-
 .../yarn/util/WindowsBasedProcessTree.java  |  7 +-
 .../util/TestLinuxResourceCalculatorPlugin.java |  4 +-
 .../yarn/util/TestProcfsBasedProcessTree.java   | 38 ++--
 .../util/TestResourceCalculatorProcessTree.java |  5 +
 .../monitor/ContainerMetrics.java   | 39 ++--
 .../monitor/ContainersMonitorImpl.java  | 18 
 .../util/NodeManagerHardwareUtils.java  | 16 +++-
 12 files changed, 311 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 03bb20b..0b71bee 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -351,6 +351,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3272. Surface container locality info in RM web UI.
 (Jian He via wangda)
 
+YARN-3122. Metrics for container's actual CPU usage. 
+(Anubhav Dhoot via kasha)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
new file mode 100644
index 000..d36848e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.math.BigInteger;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class CpuTimeTracker {
+  public static final int UNAVAILABLE = -1;
+  final long MINIMUM_UPDATE_INTERVAL;
+
+  // CPU used time since system is on (ms)
+  BigInteger cumulativeCpuTime = BigInteger.ZERO;
+
+  // CPU used time read last time (ms)
+  BigInteger lastCumulativeCpuTime = BigInteger.ZERO;
+
+  // Unix timestamp while reading the CPU time (ms)
+  long sampleTime;
+  long lastSampleTime;
+  float cpuUsage;
+  BigInteger jiffyLengthInMillis;
+
+  public CpuTimeTracker(long jiffyLengthInMillis) {
+this.jiffyLengthInMillis = BigInteger.valueOf(jiffyLengthInMillis);
+this.cpuUsage = UNAVAILABLE;
+this.sampleTime = UNAVAILABLE;
+this.lastSampleTime = UNAVAILABLE;
+MINIMUM_UPDATE_INTERVAL =  10 * jiffyLengthInMillis;
+  }
+
+  /**
+   * Return percentage of cpu time spent over the time since last update.
+   * CPU time spent is based on elapsed jiffies multiplied by amount of
+   * time for 1 core. Thus, if you use 2 cores completely you would have spent
+   * twice the actual 

[47/50] [abbrv] hadoop git commit: HADOOP-11673. Skip using JUnit Assume in TestCodec. Contributed by Brahma Reddy Battula.

2015-03-09 Thread jing9
HADOOP-11673. Skip using JUnit Assume in TestCodec.
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/662781eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/662781eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/662781eb

Branch: refs/heads/HDFS-7285
Commit: 662781eb49a925dfaa842f5b00ff9404a8ad2052
Parents: a1e4dfe
Author: Chris Douglas cdoug...@apache.org
Authored: Sun Mar 8 19:15:46 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 9 13:17:55 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../org/apache/hadoop/io/compress/TestCodec.java| 16 
 2 files changed, 7 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/662781eb/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 16002d5..0af0beb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -184,6 +184,9 @@ Trunk (Unreleased)
 HADOOP-11593. Convert site documentation from apt to markdown (stragglers)
 (Masatake Iwasaki via aw)
 
+HADOOP-11673. Skip using JUnit Assume in TestCodec. (Brahma Reddy Battula
+via cdouglas)
+
   BUG FIXES
 
 HADOOP-11473. test-patch says -1 overall even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/662781eb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
index 98b3934..7246bf5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
@@ -74,6 +74,7 @@ import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Test;
 import static org.junit.Assert.*;
+import static org.junit.Assume.*;
 
 public class TestCodec {
 
@@ -364,10 +365,7 @@ public class TestCodec {
   public void testCodecPoolGzipReuse() throws Exception {
 Configuration conf = new Configuration();
 conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
-if (!ZlibFactory.isNativeZlibLoaded(conf)) {
-  LOG.warn(testCodecPoolGzipReuse skipped: native libs not loaded);
-  return;
-}
+assumeTrue(ZlibFactory.isNativeZlibLoaded(conf));
 GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
 DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
 Compressor c1 = CodecPool.getCompressor(gzc);
@@ -723,10 +721,7 @@ public class TestCodec {
   public void testNativeGzipConcat() throws IOException {
 Configuration conf = new Configuration();
 conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
-if (!ZlibFactory.isNativeZlibLoaded(conf)) {
-  LOG.warn(skipped: native libs not loaded);
-  return;
-}
+assumeTrue(ZlibFactory.isNativeZlibLoaded(conf));
 GzipConcatTest(conf, GzipCodec.GzipZlibDecompressor.class);
   }
 
@@ -840,10 +835,7 @@ public class TestCodec {
 Configuration conf = new Configuration();
 conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, 
useNative);
 if (useNative) {
-  if (!ZlibFactory.isNativeZlibLoaded(conf)) {
-LOG.warn(testGzipCodecWrite skipped: native libs not loaded);
-return;
-  }
+  assumeTrue(ZlibFactory.isNativeZlibLoaded(conf));
 } else {
   assertFalse(ZlibFactory is using native libs against request,
   ZlibFactory.isNativeZlibLoaded(conf));



[1/3] hadoop git commit: HADOOP-11602. Backport 'Fix toUpperCase/toLowerCase to use Locale.ENGLISH.' (ozawa)

2015-03-09 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d9416317a - b46f9e72d


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46f9e72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 35b63eb..9a2bb24 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
@@ -755,7 +756,7 @@ public class ClientRMService extends AbstractService 
implements
   if (applicationTypes != null  !applicationTypes.isEmpty()) {
 String appTypeToMatch = caseSensitive
 ? application.getApplicationType()
-: application.getApplicationType().toLowerCase();
+: StringUtils.toLowerCase(application.getApplicationType());
 if (!applicationTypes.contains(appTypeToMatch)) {
   continue;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46f9e72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
index 230f9a9..d6e9e45 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
@@ -20,6 +20,7 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.resource;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.util.StringUtils;
 
 @Private
 @Evolving
@@ -61,7 +62,7 @@ public class ResourceWeights {
 sb.append(, );
   }
   ResourceType resourceType = ResourceType.values()[i];
-  sb.append(resourceType.name().toLowerCase());
+  sb.append(StringUtils.toLowerCase(resourceType.name()));
   sb.append(String.format( weight=%.1f, getWeight(resourceType)));
 }
 sb.append();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46f9e72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 3528c2d..102e553 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 

[3/3] hadoop git commit: HADOOP-11602. Backport 'Fix toUpperCase/toLowerCase to use Locale.ENGLISH.' (ozawa)

2015-03-09 Thread ozawa
HADOOP-11602. Backport 'Fix toUpperCase/toLowerCase to use Locale.ENGLISH.' 
(ozawa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b46f9e72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b46f9e72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b46f9e72

Branch: refs/heads/branch-2
Commit: b46f9e72dbed6fd1f8cae1e12973252462d6ee15
Parents: d941631
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon Mar 9 19:53:03 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon Mar 9 19:53:55 2015 +0900

--
 .../classification/tools/StabilityOptions.java  |  5 ++-
 .../AltKerberosAuthenticationHandler.java   |  6 ++-
 .../authentication/util/KerberosUtil.java   |  2 +-
 .../authentication/util/TestKerberosUtil.java   | 14 ---
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../org/apache/hadoop/conf/Configuration.java   |  6 +--
 .../org/apache/hadoop/crypto/CipherSuite.java   |  3 +-
 .../hadoop/crypto/key/JavaKeyStoreProvider.java |  3 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  7 +++-
 .../java/org/apache/hadoop/fs/StorageType.java  |  3 +-
 .../apache/hadoop/fs/permission/AclEntry.java   |  5 ++-
 .../apache/hadoop/fs/shell/XAttrCommands.java   |  2 +-
 .../org/apache/hadoop/fs/shell/find/Name.java   |  5 ++-
 .../io/compress/CompressionCodecFactory.java|  7 ++--
 .../hadoop/metrics2/impl/MetricsConfig.java |  7 ++--
 .../hadoop/metrics2/impl/MetricsSystemImpl.java |  5 ++-
 .../hadoop/record/compiler/CGenerator.java  | 10 +++--
 .../hadoop/record/compiler/CppGenerator.java| 10 +++--
 .../hadoop/record/compiler/generated/Rcc.java   |  5 ++-
 .../hadoop/security/SaslPropertiesResolver.java |  3 +-
 .../apache/hadoop/security/SecurityUtil.java| 12 +++---
 .../hadoop/security/WhitelistBasedResolver.java |  3 +-
 .../security/ssl/FileBasedKeyStoresFactory.java |  4 +-
 .../apache/hadoop/security/ssl/SSLFactory.java  |  5 ++-
 .../security/ssl/SSLHostnameVerifier.java   | 10 +++--
 .../DelegationTokenAuthenticationHandler.java   |  3 +-
 .../web/DelegationTokenAuthenticator.java   |  3 +-
 .../apache/hadoop/util/ComparableVersion.java   |  3 +-
 .../org/apache/hadoop/util/StringUtils.java | 40 +++-
 .../java/org/apache/hadoop/ipc/TestIPC.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java |  2 +-
 .../hadoop/security/TestSecurityUtil.java   |  8 ++--
 .../security/TestUserGroupInformation.java  |  5 ++-
 .../hadoop/test/TimedOutTestsListener.java  |  6 ++-
 .../org/apache/hadoop/util/TestStringUtils.java | 21 ++
 .../org/apache/hadoop/util/TestWinUtils.java|  6 ++-
 .../java/org/apache/hadoop/nfs/NfsExports.java  |  5 ++-
 .../server/CheckUploadContentTypeFilter.java|  4 +-
 .../hadoop/fs/http/server/FSOperations.java |  7 +++-
 .../http/server/HttpFSParametersProvider.java   |  4 +-
 .../org/apache/hadoop/lib/server/Server.java|  3 +-
 .../service/hadoop/FileSystemAccessService.java |  6 ++-
 .../org/apache/hadoop/lib/wsrs/EnumParam.java   |  2 +-
 .../apache/hadoop/lib/wsrs/EnumSetParam.java|  3 +-
 .../hadoop/lib/wsrs/ParametersProvider.java |  3 +-
 .../org/apache/hadoop/hdfs/XAttrHelper.java | 19 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  3 +-
 .../BlockStoragePolicySuite.java|  4 +-
 .../hdfs/server/common/HdfsServerConstants.java |  5 ++-
 .../hdfs/server/datanode/StorageLocation.java   |  4 +-
 .../hdfs/server/namenode/FSEditLogOp.java   |  3 +-
 .../namenode/QuotaByStorageTypeEntry.java   |  4 +-
 .../hdfs/server/namenode/SecondaryNameNode.java |  2 +-
 .../org/apache/hadoop/hdfs/tools/GetConf.java   | 17 +
 .../OfflineEditsVisitorFactory.java |  7 ++--
 .../offlineImageViewer/FSImageHandler.java  |  4 +-
 .../org/apache/hadoop/hdfs/web/AuthFilter.java  |  3 +-
 .../org/apache/hadoop/hdfs/web/ParamFilter.java |  3 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  4 +-
 .../hadoop/hdfs/web/resources/EnumParam.java|  3 +-
 .../hadoop/hdfs/web/resources/EnumSetParam.java |  3 +-
 .../namenode/snapshot/TestSnapshotManager.java  |  6 +--
 .../jobhistory/JobHistoryEventHandler.java  |  3 +-
 .../mapreduce/v2/app/webapp/AppController.java  |  6 +--
 .../apache/hadoop/mapreduce/TypeConverter.java  |  3 +-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  4 +-
 .../hadoop/mapreduce/TestTypeConverter.java |  6 ++-
 .../java/org/apache/hadoop/mapred/Task.java |  2 +-
 .../counters/FileSystemCounterGroup.java|  4 +-
 .../mapreduce/filecache/DistributedCache.java   |  4 +-
 .../hadoop/mapreduce/lib/db/DBInputFormat.java  |  5 ++-
 .../org/apache/hadoop/mapreduce/tools/CLI.java  |  9 +++--
 .../java/org/apache/hadoop/fs/TestDFSIO.java| 12 +++---
 .../org/apache/hadoop/fs/TestFileSystem.java|  4 +-
 

[2/3] hadoop git commit: HADOOP-11602. Backport 'Fix toUpperCase/toLowerCase to use Locale.ENGLISH.' (ozawa)

2015-03-09 Thread ozawa
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46f9e72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
index ddd8a1a..cf59c9d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Objects;
 import org.apache.hadoop.fs.StorageType;
-import java.util.Locale;
+import org.apache.hadoop.util.StringUtils;
 
  public class QuotaByStorageTypeEntry {
private StorageType type;
@@ -54,7 +54,7 @@ import java.util.Locale;
public String toString() {
  StringBuilder sb = new StringBuilder();
  assert (type != null);
- sb.append(type.toString().toLowerCase());
+ sb.append(StringUtils.toLowerCase(type.toString()));
  sb.append(':');
  sb.append(quota);
  return sb.toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46f9e72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 83e6426..ec7e0c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -587,7 +587,7 @@ public class SecondaryNameNode implements Runnable,
   return 0;
 }
 
-String cmd = opts.getCommand().toString().toLowerCase();
+String cmd = StringUtils.toLowerCase(opts.getCommand().toString());
 
 int exitCode = 0;
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46f9e72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
index 92a16cd..e6cf16c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -79,19 +80,19 @@ public class GetConf extends Configured implements Tool {
 private static final MapString, CommandHandler map;
 static  {
   map = new HashMapString, CommandHandler();
-  map.put(NAMENODE.getName().toLowerCase(), 
+  map.put(StringUtils.toLowerCase(NAMENODE.getName()),
   new NameNodesCommandHandler());
-  map.put(SECONDARY.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(SECONDARY.getName()),
   new SecondaryNameNodesCommandHandler());
-  map.put(BACKUP.getName().toLowerCase(), 
+  map.put(StringUtils.toLowerCase(BACKUP.getName()),
   new BackupNodesCommandHandler());
-  map.put(INCLUDE_FILE.getName().toLowerCase(), 
+  map.put(StringUtils.toLowerCase(INCLUDE_FILE.getName()),
   new CommandHandler(DFSConfigKeys.DFS_HOSTS));
-  map.put(EXCLUDE_FILE.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(EXCLUDE_FILE.getName()),
   new CommandHandler(DFSConfigKeys.DFS_HOSTS_EXCLUDE));
-  map.put(NNRPCADDRESSES.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(NNRPCADDRESSES.getName()),
   new NNRpcAddressesCommandHandler());
-  map.put(CONFKEY.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(CONFKEY.getName()),
   new PrintConfKeyCommandHandler());
 }
 
@@ -116,7 +117,7 @@ public class GetConf extends Configured implements Tool {
 }
 
 public static CommandHandler getHandler(String cmd) {
-  return map.get(cmd.toLowerCase());
+  return map.get(StringUtils.toLowerCase(cmd));
 }
   }
  

[3/3] hadoop git commit: HADOOP-11602. Backport 'Fix toUpperCase/toLowerCase to use Locale.ENGLISH.' (ozawa)

2015-03-09 Thread ozawa
HADOOP-11602. Backport 'Fix toUpperCase/toLowerCase to use Locale.ENGLISH.' 
(ozawa)

(cherry picked from commit b46f9e72dbed6fd1f8cae1e12973252462d6ee15)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28f4e6b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28f4e6b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28f4e6b2

Branch: refs/heads/branch-2.7
Commit: 28f4e6b22209893eaa13e8582a12bd28d759e20f
Parents: 3c24e50
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon Mar 9 19:53:03 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon Mar 9 19:54:45 2015 +0900

--
 .../classification/tools/StabilityOptions.java  |  5 ++-
 .../AltKerberosAuthenticationHandler.java   |  6 ++-
 .../authentication/util/KerberosUtil.java   |  2 +-
 .../authentication/util/TestKerberosUtil.java   | 14 ---
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../org/apache/hadoop/conf/Configuration.java   |  6 +--
 .../org/apache/hadoop/crypto/CipherSuite.java   |  3 +-
 .../hadoop/crypto/key/JavaKeyStoreProvider.java |  3 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  7 +++-
 .../java/org/apache/hadoop/fs/StorageType.java  |  3 +-
 .../apache/hadoop/fs/permission/AclEntry.java   |  5 ++-
 .../apache/hadoop/fs/shell/XAttrCommands.java   |  2 +-
 .../org/apache/hadoop/fs/shell/find/Name.java   |  5 ++-
 .../io/compress/CompressionCodecFactory.java|  7 ++--
 .../hadoop/metrics2/impl/MetricsConfig.java |  7 ++--
 .../hadoop/metrics2/impl/MetricsSystemImpl.java |  5 ++-
 .../hadoop/record/compiler/CGenerator.java  | 10 +++--
 .../hadoop/record/compiler/CppGenerator.java| 10 +++--
 .../hadoop/record/compiler/generated/Rcc.java   |  5 ++-
 .../hadoop/security/SaslPropertiesResolver.java |  3 +-
 .../apache/hadoop/security/SecurityUtil.java| 12 +++---
 .../hadoop/security/WhitelistBasedResolver.java |  3 +-
 .../security/ssl/FileBasedKeyStoresFactory.java |  4 +-
 .../apache/hadoop/security/ssl/SSLFactory.java  |  5 ++-
 .../security/ssl/SSLHostnameVerifier.java   | 10 +++--
 .../DelegationTokenAuthenticationHandler.java   |  3 +-
 .../web/DelegationTokenAuthenticator.java   |  3 +-
 .../apache/hadoop/util/ComparableVersion.java   |  3 +-
 .../org/apache/hadoop/util/StringUtils.java | 40 +++-
 .../java/org/apache/hadoop/ipc/TestIPC.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java |  2 +-
 .../hadoop/security/TestSecurityUtil.java   |  8 ++--
 .../security/TestUserGroupInformation.java  |  5 ++-
 .../hadoop/test/TimedOutTestsListener.java  |  6 ++-
 .../org/apache/hadoop/util/TestStringUtils.java | 21 ++
 .../org/apache/hadoop/util/TestWinUtils.java|  6 ++-
 .../java/org/apache/hadoop/nfs/NfsExports.java  |  5 ++-
 .../server/CheckUploadContentTypeFilter.java|  4 +-
 .../hadoop/fs/http/server/FSOperations.java |  7 +++-
 .../http/server/HttpFSParametersProvider.java   |  4 +-
 .../org/apache/hadoop/lib/server/Server.java|  3 +-
 .../service/hadoop/FileSystemAccessService.java |  6 ++-
 .../org/apache/hadoop/lib/wsrs/EnumParam.java   |  2 +-
 .../apache/hadoop/lib/wsrs/EnumSetParam.java|  3 +-
 .../hadoop/lib/wsrs/ParametersProvider.java |  3 +-
 .../org/apache/hadoop/hdfs/XAttrHelper.java | 19 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  3 +-
 .../BlockStoragePolicySuite.java|  4 +-
 .../hdfs/server/common/HdfsServerConstants.java |  5 ++-
 .../hdfs/server/datanode/StorageLocation.java   |  4 +-
 .../hdfs/server/namenode/FSEditLogOp.java   |  3 +-
 .../namenode/QuotaByStorageTypeEntry.java   |  4 +-
 .../hdfs/server/namenode/SecondaryNameNode.java |  2 +-
 .../org/apache/hadoop/hdfs/tools/GetConf.java   | 17 +
 .../OfflineEditsVisitorFactory.java |  7 ++--
 .../offlineImageViewer/FSImageHandler.java  |  4 +-
 .../org/apache/hadoop/hdfs/web/AuthFilter.java  |  3 +-
 .../org/apache/hadoop/hdfs/web/ParamFilter.java |  3 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  4 +-
 .../hadoop/hdfs/web/resources/EnumParam.java|  3 +-
 .../hadoop/hdfs/web/resources/EnumSetParam.java |  3 +-
 .../namenode/snapshot/TestSnapshotManager.java  |  6 +--
 .../jobhistory/JobHistoryEventHandler.java  |  3 +-
 .../mapreduce/v2/app/webapp/AppController.java  |  6 +--
 .../apache/hadoop/mapreduce/TypeConverter.java  |  3 +-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  4 +-
 .../hadoop/mapreduce/TestTypeConverter.java |  6 ++-
 .../java/org/apache/hadoop/mapred/Task.java |  2 +-
 .../counters/FileSystemCounterGroup.java|  4 +-
 .../mapreduce/filecache/DistributedCache.java   |  4 +-
 .../hadoop/mapreduce/lib/db/DBInputFormat.java  |  5 ++-
 .../org/apache/hadoop/mapreduce/tools/CLI.java  |  9 +++--
 .../java/org/apache/hadoop/fs/TestDFSIO.java| 

[1/3] hadoop git commit: HADOOP-11602. Backport 'Fix toUpperCase/toLowerCase to use Locale.ENGLISH.' (ozawa)

2015-03-09 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 3c24e50ce - 28f4e6b22


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f4e6b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 35b63eb..9a2bb24 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
@@ -755,7 +756,7 @@ public class ClientRMService extends AbstractService 
implements
   if (applicationTypes != null  !applicationTypes.isEmpty()) {
 String appTypeToMatch = caseSensitive
 ? application.getApplicationType()
-: application.getApplicationType().toLowerCase();
+: StringUtils.toLowerCase(application.getApplicationType());
 if (!applicationTypes.contains(appTypeToMatch)) {
   continue;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f4e6b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
index 230f9a9..d6e9e45 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
@@ -20,6 +20,7 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.resource;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.util.StringUtils;
 
 @Private
 @Evolving
@@ -61,7 +62,7 @@ public class ResourceWeights {
 sb.append(, );
   }
   ResourceType resourceType = ResourceType.values()[i];
-  sb.append(resourceType.name().toLowerCase());
+  sb.append(StringUtils.toLowerCase(resourceType.name()));
   sb.append(String.format( weight=%.1f, getWeight(resourceType)));
 }
 sb.append();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f4e6b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 3528c2d..102e553 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 

[2/3] hadoop git commit: HADOOP-11602. Backport 'Fix toUpperCase/toLowerCase to use Locale.ENGLISH.' (ozawa)

2015-03-09 Thread ozawa
http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f4e6b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
index ddd8a1a..cf59c9d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Objects;
 import org.apache.hadoop.fs.StorageType;
-import java.util.Locale;
+import org.apache.hadoop.util.StringUtils;
 
  public class QuotaByStorageTypeEntry {
private StorageType type;
@@ -54,7 +54,7 @@ import java.util.Locale;
public String toString() {
  StringBuilder sb = new StringBuilder();
  assert (type != null);
- sb.append(type.toString().toLowerCase());
+ sb.append(StringUtils.toLowerCase(type.toString()));
  sb.append(':');
  sb.append(quota);
  return sb.toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f4e6b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 83e6426..ec7e0c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -587,7 +587,7 @@ public class SecondaryNameNode implements Runnable,
   return 0;
 }
 
-String cmd = opts.getCommand().toString().toLowerCase();
+String cmd = StringUtils.toLowerCase(opts.getCommand().toString());
 
 int exitCode = 0;
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28f4e6b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
index 92a16cd..e6cf16c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -79,19 +80,19 @@ public class GetConf extends Configured implements Tool {
 private static final MapString, CommandHandler map;
 static  {
   map = new HashMapString, CommandHandler();
-  map.put(NAMENODE.getName().toLowerCase(), 
+  map.put(StringUtils.toLowerCase(NAMENODE.getName()),
   new NameNodesCommandHandler());
-  map.put(SECONDARY.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(SECONDARY.getName()),
   new SecondaryNameNodesCommandHandler());
-  map.put(BACKUP.getName().toLowerCase(), 
+  map.put(StringUtils.toLowerCase(BACKUP.getName()),
   new BackupNodesCommandHandler());
-  map.put(INCLUDE_FILE.getName().toLowerCase(), 
+  map.put(StringUtils.toLowerCase(INCLUDE_FILE.getName()),
   new CommandHandler(DFSConfigKeys.DFS_HOSTS));
-  map.put(EXCLUDE_FILE.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(EXCLUDE_FILE.getName()),
   new CommandHandler(DFSConfigKeys.DFS_HOSTS_EXCLUDE));
-  map.put(NNRPCADDRESSES.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(NNRPCADDRESSES.getName()),
   new NNRpcAddressesCommandHandler());
-  map.put(CONFKEY.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(CONFKEY.getName()),
   new PrintConfKeyCommandHandler());
 }
 
@@ -116,7 +117,7 @@ public class GetConf extends Configured implements Tool {
 }
 
 public static CommandHandler getHandler(String cmd) {
-  return map.get(cmd.toLowerCase());
+  return map.get(StringUtils.toLowerCase(cmd));
 }
   }
  

hadoop git commit: HADOOP-11226. Add a configuration to set ipc.Client's traffic class with IPTOS_LOWDELAY|IPTOS_RELIABILITY. Contributed by Gopal V.

2015-03-09 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk c3003eba6 - 54639c7d7


HADOOP-11226. Add a configuration to set ipc.Client's traffic class with 
IPTOS_LOWDELAY|IPTOS_RELIABILITY. Contributed by Gopal V.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54639c7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54639c7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54639c7d

Branch: refs/heads/trunk
Commit: 54639c7d7a34f4a46e8df50d57c79bab34b1ac07
Parents: c3003eb
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 10 13:08:29 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 10 13:08:29 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../fs/CommonConfigurationKeysPublic.java   |  6 +++-
 .../main/java/org/apache/hadoop/ipc/Client.java | 33 ++--
 .../src/main/resources/core-default.xml | 14 +
 4 files changed, 52 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54639c7d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0fe5b7c..fa73ba1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -432,6 +432,9 @@ Release 2.8.0 - UNRELEASED
 
   NEW FEATURES
 
+HADOOP-11226. Add a configuration to set ipc.Client's traffic class with
+IPTOS_LOWDELAY|IPTOS_RELIABILITY. (Gopal V via ozawa)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54639c7d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 00c8d78..470b4d0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -206,8 +206,12 @@ public class CommonConfigurationKeysPublic {
   /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
   public static final String  IPC_CLIENT_TCPNODELAY_KEY =
 ipc.client.tcpnodelay;
-  /** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */
+  /** Default value for IPC_CLIENT_TCPNODELAY_KEY */
   public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = true;
+  /** Enable low-latency connections from the client */
+  public static final String   IPC_CLIENT_LOW_LATENCY = 
ipc.client.low-latency;
+  /** Default value of IPC_CLIENT_LOW_LATENCY */
+  public static final boolean  IPC_CLIENT_LOW_LATENCY_DEFAULT = false;
   /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
   public static final String  IPC_SERVER_LISTEN_QUEUE_SIZE_KEY =
 ipc.server.listen.queue.size;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54639c7d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 32558bc..97b715b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -384,7 +384,8 @@ public class Client {
 private final RetryPolicy connectionRetryPolicy;
 private final int maxRetriesOnSasl;
 private int maxRetriesOnSocketTimeouts;
-private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+private final boolean tcpLowLatency; // if T then use low-delay QoS
 private boolean doPing; //do we need to send ping message
 private int pingInterval; // how often sends ping to the server in msecs
 private ByteArrayOutputStream pingRequest; // ping message
@@ -413,6 +414,7 @@ public class Client {
   this.maxRetriesOnSasl = remoteId.getMaxRetriesOnSasl();
   this.maxRetriesOnSocketTimeouts = 
remoteId.getMaxRetriesOnSocketTimeouts();
   this.tcpNoDelay = remoteId.getTcpNoDelay();
+  this.tcpLowLatency = remoteId.getTcpLowLatency();
   this.doPing = remoteId.getDoPing();
   if (doPing) {
  

hadoop git commit: HADOOP-11226. Add a configuration to set ipc.Client's traffic class with IPTOS_LOWDELAY|IPTOS_RELIABILITY. Contributed by Gopal V.

2015-03-09 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 402145174 - 5efee5efd


HADOOP-11226. Add a configuration to set ipc.Client's traffic class with 
IPTOS_LOWDELAY|IPTOS_RELIABILITY. Contributed by Gopal V.

(cherry picked from commit 54639c7d7a34f4a46e8df50d57c79bab34b1ac07)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5efee5ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5efee5ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5efee5ef

Branch: refs/heads/branch-2
Commit: 5efee5efd3d21cef66e19e0a44d6ed6809658bdd
Parents: 4021451
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 10 13:08:29 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 10 13:09:05 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../fs/CommonConfigurationKeysPublic.java   |  6 +++-
 .../main/java/org/apache/hadoop/ipc/Client.java | 33 ++--
 .../src/main/resources/core-default.xml | 14 +
 4 files changed, 52 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5efee5ef/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a8f5313..d5b3418 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -6,6 +6,9 @@ Release 2.8.0 - UNRELEASED
 
   NEW FEATURES
 
+HADOOP-11226. Add a configuration to set ipc.Client's traffic class with
+IPTOS_LOWDELAY|IPTOS_RELIABILITY. (Gopal V via ozawa)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5efee5ef/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 459b984..2a2cc24 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -206,8 +206,12 @@ public class CommonConfigurationKeysPublic {
   /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
   public static final String  IPC_CLIENT_TCPNODELAY_KEY =
 ipc.client.tcpnodelay;
-  /** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */
+  /** Default value for IPC_CLIENT_TCPNODELAY_KEY */
   public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = true;
+  /** Enable low-latency connections from the client */
+  public static final String   IPC_CLIENT_LOW_LATENCY = 
ipc.client.low-latency;
+  /** Default value of IPC_CLIENT_LOW_LATENCY */
+  public static final boolean  IPC_CLIENT_LOW_LATENCY_DEFAULT = false;
   /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
   public static final String  IPC_SERVER_LISTEN_QUEUE_SIZE_KEY =
 ipc.server.listen.queue.size;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5efee5ef/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 0b66cd6..30ccdda 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -384,7 +384,8 @@ public class Client {
 private final RetryPolicy connectionRetryPolicy;
 private final int maxRetriesOnSasl;
 private int maxRetriesOnSocketTimeouts;
-private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+private final boolean tcpLowLatency; // if T then use low-delay QoS
 private boolean doPing; //do we need to send ping message
 private int pingInterval; // how often sends ping to the server in msecs
 private ByteArrayOutputStream pingRequest; // ping message
@@ -413,6 +414,7 @@ public class Client {
   this.maxRetriesOnSasl = remoteId.getMaxRetriesOnSasl();
   this.maxRetriesOnSocketTimeouts = 
remoteId.getMaxRetriesOnSocketTimeouts();
   this.tcpNoDelay = remoteId.getTcpNoDelay();
+  this.tcpLowLatency = 

hadoop git commit: HADOOP-10115. Exclude duplicate jars in hadoop package under different component's lib (Vinayakumar B via aw)

2015-03-09 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 54639c7d7 - 47f7f18d4


HADOOP-10115. Exclude duplicate jars in hadoop package under different  
component's lib (Vinayakumar B via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47f7f18d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47f7f18d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47f7f18d

Branch: refs/heads/trunk
Commit: 47f7f18d4cc9145607ef3dfb70aa88748cd9dbec
Parents: 54639c7
Author: Allen Wittenauer a...@apache.org
Authored: Mon Mar 9 21:44:06 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Mon Mar 9 21:44:06 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 hadoop-dist/pom.xml | 89 +---
 2 files changed, 78 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f7f18d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index fa73ba1..f831d1a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -187,6 +187,9 @@ Trunk (Unreleased)
 HADOOP-11673. Skip using JUnit Assume in TestCodec. (Brahma Reddy Battula
 via cdouglas)
 
+HADOOP-10115. Exclude duplicate jars in hadoop package under different
+component's lib (Vinayakumar B via aw)
+
   BUG FIXES
 
 HADOOP-11473. test-patch says -1 overall even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f7f18d/hadoop-dist/pom.xml
--
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 0c82332..f894c01 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -107,25 +107,86 @@
 fi
   }
 
-  ROOT=`cd ../..;pwd`
+  findFileInDir(){
+local file=$1;
+local dir=${2:-./share};
+local count=$(find $dir -iname $file|wc -l)
+echo $count;
+  }
+
+  copyIfNotExists(){
+local src=$1
+local srcName=$(basename $src)
+local dest=$2;
+if [ -f $src ]; then
+  if [[ $srcName != *.jar ]] || [ $(findFileInDir 
$srcName) -eq 0 ]; then
+local destDir=$(dirname $dest)
+mkdir -p $destDir
+cp $src $dest
+  fi
+else
+  for childPath in $src/* ;
+  do
+child=$(basename $childPath);
+if [ $child == doc ] || [ $child == 
webapps ]; then
+  mkdir -p $dest/$child
+  cp -r $src/$child/* $dest/$child
+  continue;
+fi
+copyIfNotExists $src/$child $dest/$child
+  done
+fi
+  }
+
+  #Copy all contents as is except the lib.
+  #for libs check for existence in share directory, if not 
exist then only copy.
+  copy(){
+local src=$1;
+local dest=$2;
+if [ -d $src ]; then
+  for childPath in $src/* ;
+  do
+child=$(basename $childPath);
+if [ $child == share ]; then
+  copyIfNotExists $src/$child $dest/$child
+else
+  if [ -d $src/$child ]; then
+mkdir -p $dest/$child
+cp -r $src/$child/* $dest/$child
+  else
+cp -r $src/$child $dest/$child
+  fi
+fi
+  done
+fi
+  }
+
+  # Shellcheck SC2086
+  ROOT=$(cd ${project.build.directory}/../..;pwd)
   echo
-  echo Current directory `pwd`
+  echo Current directory $(pwd)
   echo
   run rm -rf hadoop-${project.version}
 

hadoop git commit: YARN-3300. Outstanding_resource_requests table should not be shown in AHS. Contributed by Xuan Gong

2015-03-09 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 82db3341b - c3003eba6


YARN-3300. Outstanding_resource_requests table should not be shown in AHS. 
Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3003eba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3003eba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3003eba

Branch: refs/heads/trunk
Commit: c3003eba6f9802f15699564a5eb7c6e34424cb14
Parents: 82db334
Author: Jian He jia...@apache.org
Authored: Mon Mar 9 17:49:08 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Mon Mar 9 20:46:48 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../webapp/AppAttemptPage.java  |  2 +
 .../yarn/server/webapp/AppAttemptBlock.java | 47 
 .../hadoop/yarn/server/webapp/AppBlock.java | 42 +
 .../resourcemanager/webapp/AppAttemptPage.java  |  2 +
 5 files changed, 56 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3003eba/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1894552..a6dcb29 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -740,6 +740,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3287. Made TimelineClient put methods do as the correct login context.
 (Daryn Sharp and Jonathan Eagles via zjshen)
 
+YARN-3300. Outstanding_resource_requests table should not be shown in AHS.
+(Xuan Gong via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3003eba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
index 1e0a342..540f6e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
@@ -42,6 +42,8 @@ public class AppAttemptPage extends AHSView {
 set(DATATABLES_ID, containers);
 set(initID(DATATABLES, containers), WebPageUtils.containersTableInit());
 setTableStyles(html, containers, .queue {width:6em}, .ui 
{width:8em});
+
+set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.APP_HISTORY_WEB_UI);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3003eba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
index ea33f4f..1bba4d8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
@@ -19,6 +19,11 @@ package org.apache.hadoop.yarn.server.webapp;
 
 import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static 
org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ATTEMPT_ID;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.WEB_UI_TYPE;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
@@ -38,7 +43,9 @@ import 

hadoop git commit: HADOOP-11668. hadoop-daemons.sh bw compat broke with --slaves change (Vinayakumar B via aw)

2015-03-09 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 47f7f18d4 - 771104983


HADOOP-11668. hadoop-daemons.sh bw compat broke with --slaves change 
(Vinayakumar B via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77110498
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77110498
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77110498

Branch: refs/heads/trunk
Commit: 7711049837d69d0eeabad27f2e30fab606a4adc2
Parents: 47f7f18
Author: Allen Wittenauer a...@apache.org
Authored: Mon Mar 9 22:31:50 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Mon Mar 9 22:31:50 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../hadoop-common/src/main/bin/hadoop-daemons.sh  | 13 +
 .../hadoop-common/src/main/bin/hadoop-functions.sh| 14 +-
 hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh   | 13 +
 4 files changed, 34 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77110498/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f831d1a..7d0cbee 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -423,6 +423,9 @@ Trunk (Unreleased)
 HADOOP-11653. shellprofiles should require .sh extension
 (Brahma Reddy Battula via aw)
 
+HADOOP-11668. hadoop-daemons.sh bw compat broke with --slaves change
+(Vinayakumar B via aw)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77110498/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
index 9e4e6b0..2619ab7 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
@@ -65,8 +65,13 @@ hadoop_error WARNING: Attempting to execute replacement 
\hdfs --slaves --daemo
 # we're going to turn this into
 #  hdfs --slaves --daemon (start|stop) (rest of options)
 #
-argv=(${HADOOP_USER_PARAMS[@]/start})
-argv=(${argv[@]/stop})
-argv=(${argv[@]/status})
+for (( i = 0; i  ${#HADOOP_USER_PARAMS[@]}; i++ ))
+do
+  if [[ ${HADOOP_USER_PARAMS[$i]} =~ ^start$ ]] ||
+ [[ ${HADOOP_USER_PARAMS[$i]} =~ ^stop$ ]] ||
+ [[ ${HADOOP_USER_PARAMS[$i]} =~ ^status$ ]]; then
+unset HADOOP_USER_PARAMS[$i]
+  fi
+done
 
-${hdfsscript} --slaves --daemon ${daemonmode} ${argv[@]}
+${hdfsscript} --slaves --daemon ${daemonmode} ${HADOOP_USER_PARAMS[@]}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77110498/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 9488e3c..8129c5c 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -415,7 +415,19 @@ function hadoop_common_slave_mode_execute
 
   # if --slaves is still on the command line, remove it
   # to prevent loops
-  argv=(${argv[@]/--slaves})
+  # Also remove --hostnames and --hosts along with arg values
+  local argsSize=${#argv[@]};
+  for (( i = 0; i  $argsSize; i++ ))
+  do
+if [[ ${argv[$i]} =~ ^--slaves$ ]]; then
+  unset argv[$i]
+elif [[ ${argv[$i]} =~ ^--hostnames$ ]] ||
+  [[ ${argv[$i]} =~ ^--hosts$ ]]; then
+  unset argv[$i];
+  let i++;
+  unset argv[$i];
+fi
+  done
   hadoop_connect_to_hosts -- ${argv[@]}
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77110498/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
index c6963d9..75fb1f8 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
@@ -55,9 +55,14 @@ hadoop_error WARNING: Attempting to execute replacement 
\yarn --slaves --daemo
 # we're going to turn this into
 #  yarn --slaves --daemon (start|stop) (rest of options)
 #
-argv=(${HADOOP_USER_PARAMS[@]/start})
-argv=(${argv[@]/stop})

hadoop git commit: HDFS-6806. HDFS Rolling upgrade document should mention the versions available. Contributed by J.Andreina.

2015-03-09 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk d6e05c5ee - 82db3341b


HDFS-6806. HDFS Rolling upgrade document should mention the versions available. 
Contributed by J.Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82db3341
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82db3341
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82db3341

Branch: refs/heads/trunk
Commit: 82db3341bfb344f10c4c6cc8eea0d8c19e05956a
Parents: d6e05c5
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Mar 10 11:22:11 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue Mar 10 11:22:11 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82db3341/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 094abfe..a2e552a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -737,6 +737,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast.
 (Tsz Wo Nicholas Sze via jing9)
 
+HDFS-6806. HDFS Rolling upgrade document should mention the versions
+available. (J.Andreina via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82db3341/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
index f2f3ebe..28649a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
@@ -47,7 +47,9 @@
 These two capabilities make it feasible to upgrade HDFS without incurring 
HDFS downtime.
 In order to upgrade a HDFS cluster without downtime, the cluster must be 
setup with HA.
   /p
-
+  p
+Note that rolling upgrade is supported only from Hadoop-2.4.0 onwards.
+  /p
   subsection name=Upgrade without Downtime id=UpgradeWithoutDowntime
   p
 In a HA cluster, there are two or more emNameNodes (NNs)/em, many 
emDataNodes (DNs)/em,



hadoop git commit: HADOOP-11646. Erasure Coder API for encoding and decoding of block group ( Contributed by Kai Zheng )

2015-03-09 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 80401779f - 720901acf


HADOOP-11646. Erasure Coder API for encoding and decoding of block group ( 
Contributed by Kai Zheng )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/720901ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/720901ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/720901ac

Branch: refs/heads/HDFS-7285
Commit: 720901acfc9085e35dbd2ab0f4eaa8c1cb249e81
Parents: 8040177
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Mar 9 12:32:26 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Mon Mar 9 12:32:26 2015 +0530

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   2 +
 .../apache/hadoop/io/erasurecode/ECBlock.java   |  80 ++
 .../hadoop/io/erasurecode/ECBlockGroup.java |  82 ++
 .../erasurecode/coder/AbstractErasureCoder.java |  63 +
 .../coder/AbstractErasureCodingStep.java|  59 
 .../coder/AbstractErasureDecoder.java   | 152 +++
 .../coder/AbstractErasureEncoder.java   |  50 
 .../io/erasurecode/coder/ErasureCoder.java  |  77 ++
 .../io/erasurecode/coder/ErasureCodingStep.java |  55 
 .../io/erasurecode/coder/ErasureDecoder.java|  41 +++
 .../erasurecode/coder/ErasureDecodingStep.java  |  52 
 .../io/erasurecode/coder/ErasureEncoder.java|  39 +++
 .../erasurecode/coder/ErasureEncodingStep.java  |  49 
 .../io/erasurecode/coder/XorErasureDecoder.java |  78 ++
 .../io/erasurecode/coder/XorErasureEncoder.java |  45 
 .../erasurecode/rawcoder/RawErasureCoder.java   |   2 +-
 .../erasurecode/coder/TestErasureCoderBase.java | 266 +++
 .../io/erasurecode/coder/TestXorCoder.java  |  50 
 18 files changed, 1241 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/720901ac/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index ee42c84..c17a1bd 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -15,4 +15,6 @@
 HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai 
Zheng
 ( Kai Zheng )
 
+HADOOP-11646. Erasure Coder API for encoding and decoding of block group
+( Kai Zheng via vinayakumarb )
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/720901ac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
new file mode 100644
index 000..956954a
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+/**
+ * A wrapper of block level data source/output that {@link ECChunk}s can be
+ * extracted from. For HDFS, it can be an HDFS block (250MB). Note it only 
cares
+ * about erasure coding specific logic thus avoids coupling with any HDFS block
+ * details. We can have something like HdfsBlock extend it.
+ */
+public class ECBlock {
+
+  private boolean isParity;
+  private boolean isErased;
+
+  /**
+   * A default constructor. isParity and isErased are false by default.
+   */
+  public ECBlock() {
+this(false, false);
+  }
+
+  /**
+   * A constructor specifying isParity and isErased.
+   * @param isParity
+   * @param isErased
+   */
+  public ECBlock(boolean isParity, boolean isErased) {
+this.isParity = isParity;
+this.isErased =