hadoop git commit: HDFS-8117. More accurate verification in SimulatedFSDataset: replace DEFAULT_DATABYTE with patterned data. Contributed by Zhe Zhang.

2015-04-15 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 01af29106 - 871bf6a76


HDFS-8117. More accurate verification in SimulatedFSDataset: replace 
DEFAULT_DATABYTE with patterned data. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/871bf6a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/871bf6a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/871bf6a7

Branch: refs/heads/branch-2
Commit: 871bf6a765b56215fc88c3dcfb52be4c209b82c1
Parents: 01af291
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 15 08:43:42 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 15 08:43:42 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 25 
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 12 +-
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 19 +--
 .../org/apache/hadoop/hdfs/TestSmallBlock.java  | 14 ++-
 .../server/datanode/SimulatedFSDataset.java | 25 ++--
 .../server/datanode/TestSimulatedFSDataset.java |  3 ++-
 7 files changed, 69 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bf6a7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bf9a634..32df2f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -113,6 +113,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8083. Move dfs.client.write.* conf from DFSConfigKeys to 
 HdfsClientConfigKeys.Write.  (szetszwo)
 
+HDFS-8117. More accurate verification in SimulatedFSDataset: replace
+DEFAULT_DATABYTE with patterned data. (Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bf6a7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index c3cefdf..aa73499 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -117,6 +118,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
@@ -1776,4 +1778,27 @@ public class DFSTestUtil {
 dn.setLastUpdate(Time.now() + offset);
 dn.setLastUpdateMonotonic(Time.monotonicNow() + offset);
   }
+
+  /**
+   * This method takes a set of block locations and fills the provided buffer
+   * with expected bytes based on simulated content from
+   * {@link SimulatedFSDataset}.
+   *
+   * @param lbs The block locations of a file
+   * @param expected The buffer to be filled with expected bytes on the above
+   * locations.
+   */
+  public static void fillExpectedBuf(LocatedBlocks lbs, byte[] expected) {
+Block[] blks = new Block[lbs.getLocatedBlocks().size()];
+for (int i = 0; i  lbs.getLocatedBlocks().size(); i++) {
+  blks[i] = lbs.getLocatedBlocks().get(i).getBlock().getLocalBlock();
+}
+int bufPos = 0;
+for (Block b : blks) {
+  for (long blkPos = 0; blkPos  b.getNumBytes(); blkPos++) {
+assert bufPos  expected.length;
+expected[bufPos++] = SimulatedFSDataset.simulatedByte(b, blkPos);
+  }
+}
+  }
 }


hadoop git commit: YARN-3318. Create Initial OrderingPolicy Framework and FifoOrderingPolicy. (Craig Welch via wangda)

2015-04-15 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk fddd55279 - 5004e7533


YARN-3318. Create Initial OrderingPolicy Framework and FifoOrderingPolicy. 
(Craig Welch via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5004e753
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5004e753
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5004e753

Branch: refs/heads/trunk
Commit: 5004e753322084e42dfda4be1d2db66677f86a1e
Parents: fddd552
Author: Wangda Tan wan...@apache.org
Authored: Wed Apr 15 09:56:32 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Apr 15 09:56:32 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|   4 +
 .../scheduler/ResourceUsage.java|  55 -
 .../AbstractComparatorOrderingPolicy.java   | 119 +++
 .../scheduler/policy/FifoComparator.java|  37 ++
 .../scheduler/policy/FifoOrderingPolicy.java|  54 +
 .../scheduler/policy/OrderingPolicy.java| 109 +
 .../scheduler/policy/SchedulableEntity.java |  51 
 .../scheduler/policy/MockSchedulableEntity.java |  78 
 .../policy/TestFifoOrderingPolicy.java  |  83 +
 10 files changed, 592 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5004e753/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2db02a2a..ecdda61 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -75,6 +75,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3361. CapacityScheduler side changes to support non-exclusive node
 labels. (Wangda Tan via jianhe)
 
+YARN-3318. Create Initial OrderingPolicy Framework and FifoOrderingPolicy.
+(Craig Welch via wangda)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5004e753/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 375d19c..4b01a4d 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -141,6 +141,10 @@
 Class 
name=org.apache.hadoop.yarn.server.resourcemanager.resource.Priority$Comparator
 /
 Bug pattern=SE_COMPARATOR_SHOULD_BE_SERIALIZABLE /
   /Match
+Match
+Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FifoComparator
 /
+Bug pattern=SE_COMPARATOR_SHOULD_BE_SERIALIZABLE /
+  /Match
   !-- Ignore some irrelevant class name warning --
   Match
 Class name=org.apache.hadoop.yarn.api.records.SerializedException /

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5004e753/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
index 5169b78..2f7e19d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
@@ -57,7 +57,10 @@ public class ResourceUsage {
 
   // Usage enum here to make implement cleaner
   private enum ResourceType {
-USED(0), PENDING(1), AMUSED(2), RESERVED(3);
+//CACHED_USED and CACHED_PENDING may be read by anyone, but must only
+//be written by ordering policies
+USED(0), PENDING(1), AMUSED(2), RESERVED(3), CACHED_USED(4),
+  CACHED_PENDING(5);
 
 private int idx;
 
@@ -102,6 +105,14 @@ public class ResourceUsage {
   public Resource getUsed(String label) {
 return _get(label, ResourceType.USED);
   }
+  
+  public Resource getCachedUsed(String label) {
+return _get(label, ResourceType.CACHED_USED);
+  }
+  
+  public 

hadoop git commit: YARN-3318. Create Initial OrderingPolicy Framework and FifoOrderingPolicy. (Craig Welch via wangda)

2015-04-15 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 871bf6a76 - 89a7c9843


YARN-3318. Create Initial OrderingPolicy Framework and FifoOrderingPolicy. 
(Craig Welch via wangda)

(cherry picked from commit 5004e753322084e42dfda4be1d2db66677f86a1e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89a7c984
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89a7c984
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89a7c984

Branch: refs/heads/branch-2
Commit: 89a7c9843605efcc9f6d6ee6df47260b035520dd
Parents: 871bf6a
Author: Wangda Tan wan...@apache.org
Authored: Wed Apr 15 09:56:32 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Apr 15 09:57:52 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|   4 +
 .../scheduler/ResourceUsage.java|  55 -
 .../AbstractComparatorOrderingPolicy.java   | 119 +++
 .../scheduler/policy/FifoComparator.java|  37 ++
 .../scheduler/policy/FifoOrderingPolicy.java|  54 +
 .../scheduler/policy/OrderingPolicy.java| 109 +
 .../scheduler/policy/SchedulableEntity.java |  51 
 .../scheduler/policy/MockSchedulableEntity.java |  78 
 .../policy/TestFifoOrderingPolicy.java  |  83 +
 10 files changed, 592 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a7c984/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b5e850e..bcbf445 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -27,6 +27,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3361. CapacityScheduler side changes to support non-exclusive node
 labels. (Wangda Tan via jianhe)
 
+YARN-3318. Create Initial OrderingPolicy Framework and FifoOrderingPolicy.
+(Craig Welch via wangda)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a7c984/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 375d19c..4b01a4d 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -141,6 +141,10 @@
 Class 
name=org.apache.hadoop.yarn.server.resourcemanager.resource.Priority$Comparator
 /
 Bug pattern=SE_COMPARATOR_SHOULD_BE_SERIALIZABLE /
   /Match
+Match
+Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FifoComparator
 /
+Bug pattern=SE_COMPARATOR_SHOULD_BE_SERIALIZABLE /
+  /Match
   !-- Ignore some irrelevant class name warning --
   Match
 Class name=org.apache.hadoop.yarn.api.records.SerializedException /

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a7c984/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
index 5169b78..2f7e19d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
@@ -57,7 +57,10 @@ public class ResourceUsage {
 
   // Usage enum here to make implement cleaner
   private enum ResourceType {
-USED(0), PENDING(1), AMUSED(2), RESERVED(3);
+//CACHED_USED and CACHED_PENDING may be read by anyone, but must only
+//be written by ordering policies
+USED(0), PENDING(1), AMUSED(2), RESERVED(3), CACHED_USED(4),
+  CACHED_PENDING(5);
 
 private int idx;
 
@@ -102,6 +105,14 @@ public class ResourceUsage {
   public Resource getUsed(String label) {
 return _get(label, ResourceType.USED);
   }
+  
+  public Resource getCachedUsed(String 

[1/2] hadoop git commit: HDFS-8144. Split TestLazyPersistFiles into multiple tests. (Arpit Agarwal)

2015-04-15 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 89a7c9843 - 96e12fa46
  refs/heads/trunk 5004e7533 - 9e8309a1b


HDFS-8144. Split TestLazyPersistFiles into multiple tests. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e8309a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e8309a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e8309a1

Branch: refs/heads/trunk
Commit: 9e8309a1b2989d07d43e20940d9ac12b7b43482f
Parents: 5004e75
Author: Arpit Agarwal a...@apache.org
Authored: Wed Apr 15 10:25:04 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Wed Apr 15 10:25:04 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../fsdataset/impl/LazyPersistTestCase.java |   1 +
 .../fsdataset/impl/TestLazyPersistFiles.java| 459 +--
 .../fsdataset/impl/TestLazyPersistPolicy.java   |  91 
 .../impl/TestLazyPersistReplicaPlacement.java   | 148 ++
 .../impl/TestLazyPersistReplicaRecovery.java|  75 +++
 .../datanode/fsdataset/impl/TestLazyWriter.java | 276 +++
 7 files changed, 594 insertions(+), 458 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e8309a1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 690056d..5a0f6f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -434,6 +434,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8117. More accurate verification in SimulatedFSDataset: replace
 DEFAULT_DATABYTE with patterned data. (Zhe Zhang via wang)
 
+HDFS-8144. Split TestLazyPersistFiles into multiple tests. (Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e8309a1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index 89a70c9..baa540d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -64,6 +64,7 @@ import static org.junit.Assert.assertThat;
 import static org.junit.Assert.fail;
 
 public abstract class LazyPersistTestCase {
+  static final byte LAZY_PERSIST_POLICY_ID = (byte) 15;
 
   static {
 DFSTestUtil.setNameNodeLogLevel(Level.ALL);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e8309a1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index bd64cbe..30e5d26 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
@@ -48,182 +48,8 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 public class TestLazyPersistFiles extends LazyPersistTestCase {
-  private static final byte LAZY_PERSIST_POLICY_ID = (byte) 15;
-
   private static final int THREADPOOL_SIZE = 10;
 
-  @Test
-  public void testPolicyNotSetByDefault() throws IOException {
-startUpCluster(false, -1);
-final String METHOD_NAME = GenericTestUtils.getMethodName();
-Path path = new Path(/ + METHOD_NAME + .dat);
-
-makeTestFile(path, 0, false);
-// Stat the file and check that the LAZY_PERSIST policy is not
-// returned back.
-HdfsFileStatus status = client.getFileInfo(path.toString());
-assertThat(status.getStoragePolicy(), not(LAZY_PERSIST_POLICY_ID));
-  }
-
-  @Test
-  public void testPolicyPropagation() throws IOException {
-startUpCluster(false, -1);
-final String METHOD_NAME = GenericTestUtils.getMethodName();
-Path path = new 

[2/2] hadoop git commit: HDFS-8144. Split TestLazyPersistFiles into multiple tests. (Arpit Agarwal)

2015-04-15 Thread arp
HDFS-8144. Split TestLazyPersistFiles into multiple tests. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96e12fa4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96e12fa4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96e12fa4

Branch: refs/heads/branch-2
Commit: 96e12fa46a67b9611416871a9de6183e4ecddf47
Parents: 89a7c98
Author: Arpit Agarwal a...@apache.org
Authored: Wed Apr 15 10:25:04 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Wed Apr 15 10:25:16 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../fsdataset/impl/LazyPersistTestCase.java |   1 +
 .../fsdataset/impl/TestLazyPersistFiles.java| 459 +--
 .../fsdataset/impl/TestLazyPersistPolicy.java   |  91 
 .../impl/TestLazyPersistReplicaPlacement.java   | 148 ++
 .../impl/TestLazyPersistReplicaRecovery.java|  75 +++
 .../datanode/fsdataset/impl/TestLazyWriter.java | 276 +++
 7 files changed, 594 insertions(+), 458 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96e12fa4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 32df2f7..a8a44c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -116,6 +116,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8117. More accurate verification in SimulatedFSDataset: replace
 DEFAULT_DATABYTE with patterned data. (Zhe Zhang via wang)
 
+HDFS-8144. Split TestLazyPersistFiles into multiple tests. (Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96e12fa4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index 89a70c9..baa540d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -64,6 +64,7 @@ import static org.junit.Assert.assertThat;
 import static org.junit.Assert.fail;
 
 public abstract class LazyPersistTestCase {
+  static final byte LAZY_PERSIST_POLICY_ID = (byte) 15;
 
   static {
 DFSTestUtil.setNameNodeLogLevel(Level.ALL);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96e12fa4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index bd64cbe..30e5d26 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
@@ -48,182 +48,8 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 public class TestLazyPersistFiles extends LazyPersistTestCase {
-  private static final byte LAZY_PERSIST_POLICY_ID = (byte) 15;
-
   private static final int THREADPOOL_SIZE = 10;
 
-  @Test
-  public void testPolicyNotSetByDefault() throws IOException {
-startUpCluster(false, -1);
-final String METHOD_NAME = GenericTestUtils.getMethodName();
-Path path = new Path(/ + METHOD_NAME + .dat);
-
-makeTestFile(path, 0, false);
-// Stat the file and check that the LAZY_PERSIST policy is not
-// returned back.
-HdfsFileStatus status = client.getFileInfo(path.toString());
-assertThat(status.getStoragePolicy(), not(LAZY_PERSIST_POLICY_ID));
-  }
-
-  @Test
-  public void testPolicyPropagation() throws IOException {
-startUpCluster(false, -1);
-final String METHOD_NAME = GenericTestUtils.getMethodName();
-Path path = new Path(/ + METHOD_NAME + .dat);
-
-makeTestFile(path, 0, true);
-// Stat the file and check that the lazyPersist 

hadoop git commit: HDFS-8149. The footer of the Web UI Hadoop, 2014 is old. Contributed by Brahma Reddy Battula.

2015-04-15 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 725621d95 - 6d183561f


HDFS-8149. The footer of the Web UI Hadoop, 2014 is old. Contributed by 
Brahma Reddy Battula.

(cherry picked from commit de0f1700c150a819b38028c44ef1926507086e6c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d183561
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d183561
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d183561

Branch: refs/heads/branch-2
Commit: 6d183561ffbbd87ebaca9b67fe9bda727724aff7
Parents: 725621d
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Apr 15 15:31:04 2015 -0500
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed Apr 15 15:32:08 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/main/webapps/datanode/index.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/journal/index.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/secondary/status.html| 2 +-
 6 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d183561/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5c8b2d5..aa3e9da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -210,6 +210,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-7934. Update RollingUpgrade rollback documentation: should use
 bootstrapstandby for standby NN. (J. Andreina via jing9)
 
+HDFS-8149. The footer of the Web UI Hadoop, 2014 is old.
+(Brahma Reddy Battula via aajisaka)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d183561/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
index 04cb703..aeb61c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
@@ -47,7 +47,7 @@
 
 div class=row
   hr /
-  div class=col-xs-2pHadoop, 2014./p/div
+  div class=col-xs-2pHadoop, 2015./p/div
 /div
 /div
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d183561/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 928431c..5a3a309 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -67,7 +67,7 @@
 
 div class=row
   hr /
-  div class=col-xs-2pHadoop, 2014./p/div
+  div class=col-xs-2pHadoop, 2015./p/div
 /div
 /div
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d183561/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index cd6623c..fbea6ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -142,7 +142,7 @@
 
   div class=row
 hr /
-div class=col-xs-2pHadoop, 2014./p/div
+div class=col-xs-2pHadoop, 2015./p/div
   /div
 
 /div

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d183561/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
index 5eabf64..4d658c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
@@ -47,7 +47,7 @@
 
 div class=row
   hr /
-  div class=col-xs-2pHadoop, 2014./p/div
+  div class=col-xs-2pHadoop, 2015./p/div
 /div
 /div
 


hadoop git commit: HDFS-8149. The footer of the Web UI Hadoop, 2014 is old. Contributed by Brahma Reddy Battula.

2015-04-15 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 e1ce0700e - 35e1936f8


HDFS-8149. The footer of the Web UI Hadoop, 2014 is old. Contributed by 
Brahma Reddy Battula.

(cherry picked from commit de0f1700c150a819b38028c44ef1926507086e6c)
(cherry picked from commit 6d183561ffbbd87ebaca9b67fe9bda727724aff7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35e1936f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35e1936f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35e1936f

Branch: refs/heads/branch-2.7
Commit: 35e1936f84186b3edfe68c6ccf65e0a7ef772942
Parents: e1ce070
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Apr 15 15:31:04 2015 -0500
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed Apr 15 15:32:29 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/main/webapps/datanode/index.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/journal/index.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/secondary/status.html| 2 +-
 6 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35e1936f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 242a2c8..4db159b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -26,6 +26,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-7934. Update RollingUpgrade rollback documentation: should use
 bootstrapstandby for standby NN. (J. Andreina via jing9)
 
+HDFS-8149. The footer of the Web UI Hadoop, 2014 is old.
+(Brahma Reddy Battula via aajisaka)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35e1936f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
index 04cb703..aeb61c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
@@ -47,7 +47,7 @@
 
 div class=row
   hr /
-  div class=col-xs-2pHadoop, 2014./p/div
+  div class=col-xs-2pHadoop, 2015./p/div
 /div
 /div
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35e1936f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 391ca79..3d2a906 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -67,7 +67,7 @@
 
 div class=row
   hr /
-  div class=col-xs-2pHadoop, 2014./p/div
+  div class=col-xs-2pHadoop, 2015./p/div
 /div
 /div
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35e1936f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 7b34044..433c75f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -99,7 +99,7 @@
 
   div class=row
 hr /
-div class=col-xs-2pHadoop, 2014./p/div
+div class=col-xs-2pHadoop, 2015./p/div
   /div
 
 /div

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35e1936f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
index 5eabf64..4d658c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
@@ -47,7 +47,7 @@
 
 div class=row
   hr /
-  div class=col-xs-2pHadoop, 2014./p/div
+  div class=col-xs-2pHadoop, 2015./p/div
 /div
 /div
 


hadoop git commit: HDFS-8149. The footer of the Web UI Hadoop, 2014 is old. Contributed by Brahma Reddy Battula.

2015-04-15 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk b172d0359 - de0f1700c


HDFS-8149. The footer of the Web UI Hadoop, 2014 is old. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de0f1700
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de0f1700
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de0f1700

Branch: refs/heads/trunk
Commit: de0f1700c150a819b38028c44ef1926507086e6c
Parents: b172d03
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Apr 15 15:31:04 2015 -0500
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed Apr 15 15:31:04 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/main/webapps/datanode/index.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/journal/index.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/secondary/status.html| 2 +-
 6 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de0f1700/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 60fff16..6523423 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -528,6 +528,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-7934. Update RollingUpgrade rollback documentation: should use
 bootstrapstandby for standby NN. (J. Andreina via jing9)
 
+HDFS-8149. The footer of the Web UI Hadoop, 2014 is old.
+(Brahma Reddy Battula via aajisaka)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de0f1700/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
index 04cb703..aeb61c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html
@@ -47,7 +47,7 @@
 
 div class=row
   hr /
-  div class=col-xs-2pHadoop, 2014./p/div
+  div class=col-xs-2pHadoop, 2015./p/div
 /div
 /div
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de0f1700/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 928431c..5a3a309 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -67,7 +67,7 @@
 
 div class=row
   hr /
-  div class=col-xs-2pHadoop, 2014./p/div
+  div class=col-xs-2pHadoop, 2015./p/div
 /div
 /div
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de0f1700/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index cd6623c..fbea6ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -142,7 +142,7 @@
 
   div class=row
 hr /
-div class=col-xs-2pHadoop, 2014./p/div
+div class=col-xs-2pHadoop, 2015./p/div
   /div
 
 /div

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de0f1700/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
index 5eabf64..4d658c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
@@ -47,7 +47,7 @@
 
 div class=row
   hr /
-  div class=col-xs-2pHadoop, 2014./p/div
+  div class=col-xs-2pHadoop, 2015./p/div
 /div
 /div
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de0f1700/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html

hadoop git commit: YARN-3404. Display queue name on application page. Contributed by Ryu Kobayashi (cherry picked from commit b2e6cf607f1712d103520ca6b3ff21ecc07cd265)

2015-04-15 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6d183561f - 58f99b740


YARN-3404. Display queue name on application page. Contributed by Ryu Kobayashi
(cherry picked from commit b2e6cf607f1712d103520ca6b3ff21ecc07cd265)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58f99b74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58f99b74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58f99b74

Branch: refs/heads/branch-2
Commit: 58f99b740afeaffa1ec46ec9fd7fee3b53399e9e
Parents: 6d18356
Author: Jian He jia...@apache.org
Authored: Wed Apr 15 13:52:50 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Wed Apr 15 13:53:24 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 2 ++
 .../java/org/apache/hadoop/yarn/server/webapp/AppBlock.java   | 7 ++-
 2 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58f99b74/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ebeb7c2..06b7d75 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -83,6 +83,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3394. Enrich WebApplication proxy documentation. (Naganarasimha G R
 via jianhe)
 
+YARN-3404. Display queue name on application page. (Ryu Kobayashi via 
jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58f99b74/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index d5a3dd8..0d80339 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -157,8 +158,11 @@ public class AppBlock extends HtmlBlock {
   html.script().$type(text/javascript)._(script.toString())._();
 }
 
+String schedulerPath = WebAppUtils.getResolvedRMWebAppURLWithScheme(conf) +
+/cluster/scheduler?openQueues= + app.getQueue();
+
 ResponseInfo overviewTable = info(Application Overview)
-  ._(User:, app.getUser())
+  ._(User:, schedulerPath, app.getUser())
   ._(Name:, app.getName())
   ._(Application Type:, app.getType())
   ._(Application Tags:,
@@ -167,6 +171,7 @@ public class AppBlock extends HtmlBlock {
 YarnApplicationState:,
 app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app
   .getAppState()))
+  ._(Queue:, schedulerPath, app.getQueue())
   ._(FinalStatus Reported by AM:,
 clairfyAppFinalStatus(app.getFinalAppStatus()))
   ._(Started:, Times.format(app.getStartedTime()))



hadoop git commit: YARN-3404. Display queue name on application page. Contributed by Ryu Kobayashi

2015-04-15 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk de0f1700c - b2e6cf607


YARN-3404. Display queue name on application page. Contributed by Ryu Kobayashi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2e6cf60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2e6cf60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2e6cf60

Branch: refs/heads/trunk
Commit: b2e6cf607f1712d103520ca6b3ff21ecc07cd265
Parents: de0f170
Author: Jian He jia...@apache.org
Authored: Wed Apr 15 13:52:50 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Wed Apr 15 13:52:50 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 2 ++
 .../java/org/apache/hadoop/yarn/server/webapp/AppBlock.java   | 7 ++-
 2 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2e6cf60/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4a29b77..7ec4b50 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -131,6 +131,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3394. Enrich WebApplication proxy documentation. (Naganarasimha G R
 via jianhe)
 
+YARN-3404. Display queue name on application page. (Ryu Kobayashi via 
jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2e6cf60/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index d5a3dd8..0d80339 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -157,8 +158,11 @@ public class AppBlock extends HtmlBlock {
   html.script().$type(text/javascript)._(script.toString())._();
 }
 
+String schedulerPath = WebAppUtils.getResolvedRMWebAppURLWithScheme(conf) +
+/cluster/scheduler?openQueues= + app.getQueue();
+
 ResponseInfo overviewTable = info(Application Overview)
-  ._(User:, app.getUser())
+  ._(User:, schedulerPath, app.getUser())
   ._(Name:, app.getName())
   ._(Application Type:, app.getType())
   ._(Application Tags:,
@@ -167,6 +171,7 @@ public class AppBlock extends HtmlBlock {
 YarnApplicationState:,
 app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app
   .getAppState()))
+  ._(Queue:, schedulerPath, app.getQueue())
   ._(FinalStatus Reported by AM:,
 clairfyAppFinalStatus(app.getFinalAppStatus()))
   ._(Started:, Times.format(app.getStartedTime()))



hadoop git commit: HDFS-8151. Always use snapshot path as source when invalid snapshot names are used for diff based distcp. Contributed by Jing Zhao.

2015-04-15 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk e48cedc66 - 4c097e473


HDFS-8151. Always use snapshot path as source when invalid snapshot names are 
used for diff based distcp. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c097e47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c097e47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c097e47

Branch: refs/heads/trunk
Commit: 4c097e473bb1f18d1510deb61bae2bcb8c156f18
Parents: e48cedc
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 12:37:20 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 15 12:37:20 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/tools/DistCpSync.java | 12 +++-
 .../java/org/apache/hadoop/tools/TestDistCpSync.java | 15 ++-
 3 files changed, 24 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c097e47/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a0f6f2..574faa2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -522,6 +522,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8127. NameNode Failover during HA upgrade can cause DataNode to
 finalize upgrade. (jing9)
 
+HDFS-8151. Always use snapshot path as source when invalid snapshot names
+are used for diff based distcp. (jing9)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c097e47/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
index 8e71b6f..5bf638d 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
@@ -47,8 +47,8 @@ class DistCpSync {
 ListPath sourcePaths = inputOptions.getSourcePaths();
 if (sourcePaths.size() != 1) {
   // we only support one source dir which must be a snapshottable directory
-  DistCp.LOG.warn(sourcePaths.size() +  source paths are provided);
-  return false;
+  throw new IllegalArgumentException(sourcePaths.size()
+  +  source paths are provided);
 }
 final Path sourceDir = sourcePaths.get(0);
 final Path targetDir = inputOptions.getTargetPath();
@@ -59,15 +59,17 @@ class DistCpSync {
 // DistributedFileSystem.
 if (!(sfs instanceof DistributedFileSystem) ||
 !(tfs instanceof DistributedFileSystem)) {
-  DistCp.LOG.warn(To use diff-based distcp, the FileSystems needs to +
-   be DistributedFileSystem);
-  return false;
+  throw new IllegalArgumentException(The FileSystems needs to +
+   be DistributedFileSystem for using snapshot-diff-based distcp);
 }
 final DistributedFileSystem sourceFs = (DistributedFileSystem) sfs;
 final DistributedFileSystem targetFs= (DistributedFileSystem) tfs;
 
 // make sure targetFS has no change between from and the current states
 if (!checkNoChange(inputOptions, targetFs, targetDir)) {
+  // set the source path using the snapshot path
+  
inputOptions.setSourcePaths(Arrays.asList(getSourceSnapshotPath(sourceDir,
+  inputOptions.getToSnapshot(;
   return false;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c097e47/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
index 75d1de5..0a9a11f 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
@@ -88,24 +88,37 @@ public class TestDistCpSync {
   public void testFallback() throws Exception {
 // the source/target dir are not snapshottable dir
 Assert.assertFalse(DistCpSync.sync(options, conf));
+// make sure the source path has been updated to the snapshot path
+final Path spath = new Path(source,
+HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + s2);
+Assert.assertEquals(spath, 

hadoop git commit: HDFS-8151. Always use snapshot path as source when invalid snapshot names are used for diff based distcp. Contributed by Jing Zhao.

2015-04-15 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 d4a462c02 - 9e11ae684


HDFS-8151. Always use snapshot path as source when invalid snapshot names are 
used for diff based distcp. Contributed by Jing Zhao.

(cherry picked from commit 4c097e473bb1f18d1510deb61bae2bcb8c156f18)
(cherry picked from commit d4dd97eabd6691eedeeb9fb7685060dfb192ff21)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e11ae68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e11ae68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e11ae68

Branch: refs/heads/branch-2.7
Commit: 9e11ae684a520ade428cf8eb31db0ad821be73f3
Parents: d4a462c
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 12:37:20 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 15 12:41:34 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/tools/DistCpSync.java | 12 +++-
 .../java/org/apache/hadoop/tools/TestDistCpSync.java | 15 ++-
 3 files changed, 24 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e11ae68/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7c9225a..fda744b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -20,6 +20,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8127. NameNode Failover during HA upgrade can cause DataNode to
 finalize upgrade. (jing9)
 
+HDFS-8151. Always use snapshot path as source when invalid snapshot names
+are used for diff based distcp. (jing9)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e11ae68/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
index 8e71b6f..5bf638d 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
@@ -47,8 +47,8 @@ class DistCpSync {
 ListPath sourcePaths = inputOptions.getSourcePaths();
 if (sourcePaths.size() != 1) {
   // we only support one source dir which must be a snapshottable directory
-  DistCp.LOG.warn(sourcePaths.size() +  source paths are provided);
-  return false;
+  throw new IllegalArgumentException(sourcePaths.size()
+  +  source paths are provided);
 }
 final Path sourceDir = sourcePaths.get(0);
 final Path targetDir = inputOptions.getTargetPath();
@@ -59,15 +59,17 @@ class DistCpSync {
 // DistributedFileSystem.
 if (!(sfs instanceof DistributedFileSystem) ||
 !(tfs instanceof DistributedFileSystem)) {
-  DistCp.LOG.warn(To use diff-based distcp, the FileSystems needs to +
-   be DistributedFileSystem);
-  return false;
+  throw new IllegalArgumentException(The FileSystems needs to +
+   be DistributedFileSystem for using snapshot-diff-based distcp);
 }
 final DistributedFileSystem sourceFs = (DistributedFileSystem) sfs;
 final DistributedFileSystem targetFs= (DistributedFileSystem) tfs;
 
 // make sure targetFS has no change between from and the current states
 if (!checkNoChange(inputOptions, targetFs, targetDir)) {
+  // set the source path using the snapshot path
+  
inputOptions.setSourcePaths(Arrays.asList(getSourceSnapshotPath(sourceDir,
+  inputOptions.getToSnapshot(;
   return false;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e11ae68/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
index 75d1de5..0a9a11f 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
@@ -88,24 +88,37 @@ public class TestDistCpSync {
   public void testFallback() throws Exception {
 // the source/target dir are not snapshottable dir
 Assert.assertFalse(DistCpSync.sync(options, conf));
+// make sure the source path has been updated to the 

hadoop git commit: HDFS-8120. Erasure coding: created util class to analyze striped block groups. Contributed by Zhe Zhang and Li Bo.

2015-04-15 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 295ccbca7 - 903bf3700


HDFS-8120. Erasure coding: created util class to analyze striped block groups. 
Contributed by Zhe Zhang and Li Bo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/903bf370
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/903bf370
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/903bf370

Branch: refs/heads/HDFS-7285
Commit: 903bf37006630185581c4b4e3d11283c36a61aab
Parents: 295ccbc
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 12:59:27 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 15 12:59:27 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   4 +-
 .../hadoop/hdfs/DFSStripedInputStream.java  |  77 +++
 .../hadoop/hdfs/DFSStripedOutputStream.java |  34 +++--
 .../apache/hadoop/hdfs/StripedDataStreamer.java |  58 ++--
 .../server/blockmanagement/BlockManager.java|  26 +++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 138 +++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  91 +++-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  83 +--
 .../apache/hadoop/hdfs/TestReadStripedFile.java |  92 +++--
 .../server/namenode/TestAddStripedBlocks.java   | 107 ++
 .../namenode/TestRecoverStripedBlocks.java  |   3 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  | 125 +
 12 files changed, 562 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/903bf370/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6006693..9ef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1169,9 +1169,9 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   int nread = reader.readAll(buf, offsets[i], lengths[i]);
   updateReadStatistics(readStatistics, nread, reader);
 
-  if (nread != len) {
+  if (nread != lengths[i]) {
 throw new IOException(truncated return from reader.read():  +
-excpected  + len + , got  + nread);
+excpected  + lengths[i] + , got  + nread);
   }
 }
 DFSClientFaultInjector.get().readFromDatanodeDelay();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/903bf370/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 8a431b1..d597407 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
@@ -50,7 +51,7 @@ import java.util.concurrent.Future;
  *
  * | - Striped Block Group - |
  *  blk_0  blk_1   blk_2   - A striped block group has
- *|  |   |  {@link #groupSize} blocks
+ *|  |   |  {@link #dataBlkNum} blocks
  *v  v   v
  * +--+   +--+   +--+
  * |cell_0|   |cell_1|   |cell_2|  - The logical read order should be
@@ -72,7 +73,7 @@ import java.util.concurrent.Future;
 public class DFSStripedInputStream extends DFSInputStream {
   /**
* This method plans the read portion from each block in the stripe
-   * @param groupSize The size / width of the striping group
+   * @param dataBlkNum The number of data blocks in the striping group
* @param cellSize The size of each striping cell
* @param startInBlk Starting offset in the striped block
* @param len Length of the read request
@@ -81,29 +82,29 @@ public class DFSStripedInputStream extends DFSInputStream {
 

hadoop git commit: HDFS-7934. Update RollingUpgrade rollback documentation: should use bootstrapstandby for standby NN. Contributed by J. Andreina.

2015-04-15 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d4dd97eab - 725621d95


HDFS-7934. Update RollingUpgrade rollback documentation: should use 
bootstrapstandby for standby NN. Contributed by J. Andreina.

(cherry picked from commit b172d03595d1591e7f542791224607d8c5fce3e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/725621d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/725621d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/725621d9

Branch: refs/heads/branch-2
Commit: 725621d95a833f813e2164e32b33984e1c1ee29a
Parents: d4dd97e
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 13:10:38 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 15 13:11:15 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/725621d9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4fcfc06..5c8b2d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -207,6 +207,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8151. Always use snapshot path as source when invalid snapshot names
 are used for diff based distcp. (jing9)
 
+HDFS-7934. Update RollingUpgrade rollback documentation: should use
+bootstrapstandby for standby NN. (J. Andreina via jing9)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/725621d9/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
index a62198f..8fd4f1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
@@ -263,14 +263,16 @@
   p
 Rollback from a newer release to the pre-upgrade release is always 
supported.
 However, it cannot be done in a rolling fashion.  It requires cluster 
downtime.
+Suppose emNN1/em and emNN2/em are respectively in active and 
standby states.
 Below are the steps for rollback:
   /p
   ul
 liRollback HDFSol
   liShutdown all emNNs/em and emDNs/em./li
   liRestore the pre-upgrade release in all machines./li
-  liStart emNNs/em with the
+  liStart emNN1/em as Active with the
 a href=#namenode_-rollingUpgradecode-rollingUpgrade 
rollback/code/a option./li
+  liRun `-bootstrapStandby' on NN2 and start it normally as standby./li
   liStart emDNs/em with the code-rollback/code option./li
 /ol/li
   /ul



hadoop git commit: HDFS-7934. Update RollingUpgrade rollback documentation: should use bootstrapstandby for standby NN. Contributed by J. Andreina.

2015-04-15 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4c097e473 - b172d0359


HDFS-7934. Update RollingUpgrade rollback documentation: should use 
bootstrapstandby for standby NN. Contributed by J. Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b172d035
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b172d035
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b172d035

Branch: refs/heads/trunk
Commit: b172d03595d1591e7f542791224607d8c5fce3e2
Parents: 4c097e4
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 13:10:38 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 15 13:10:38 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b172d035/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 574faa2..60fff16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -525,6 +525,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8151. Always use snapshot path as source when invalid snapshot names
 are used for diff based distcp. (jing9)
 
+HDFS-7934. Update RollingUpgrade rollback documentation: should use
+bootstrapstandby for standby NN. (J. Andreina via jing9)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b172d035/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
index 2ad28e1..1c3dc60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
@@ -263,14 +263,16 @@
   p
 Rollback from a newer release to the pre-upgrade release is always 
supported.
 However, it cannot be done in a rolling fashion.  It requires cluster 
downtime.
+Suppose emNN1/em and emNN2/em are respectively in active and 
standby states.
 Below are the steps for rollback:
   /p
   ul
 liRollback HDFSol
   liShutdown all emNNs/em and emDNs/em./li
   liRestore the pre-upgrade release in all machines./li
-  liStart emNNs/em with the
+  liStart emNN1/em as Active with the
 a href=#namenode_-rollingUpgradecode-rollingUpgrade 
rollback/code/a option./li
+  liRun `-bootstrapStandby' on NN2 and start it normally as standby./li
   liStart emDNs/em with the code-rollback/code option./li
 /ol/li
   /ul



hadoop git commit: HDFS-7934. Update RollingUpgrade rollback documentation: should use bootstrapstandby for standby NN. Contributed by J. Andreina.

2015-04-15 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 9e11ae684 - e1ce0700e


HDFS-7934. Update RollingUpgrade rollback documentation: should use 
bootstrapstandby for standby NN. Contributed by J. Andreina.

(cherry picked from commit b172d03595d1591e7f542791224607d8c5fce3e2)
(cherry picked from commit 725621d95a833f813e2164e32b33984e1c1ee29a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1ce0700
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1ce0700
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1ce0700

Branch: refs/heads/branch-2.7
Commit: e1ce0700eb2c5bf2b765e6a93df8146207a6fa9e
Parents: 9e11ae6
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 13:10:38 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 15 13:11:45 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ce0700/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fda744b..242a2c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -23,6 +23,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8151. Always use snapshot path as source when invalid snapshot names
 are used for diff based distcp. (jing9)
 
+HDFS-7934. Update RollingUpgrade rollback documentation: should use
+bootstrapstandby for standby NN. (J. Andreina via jing9)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ce0700/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
index a62198f..8fd4f1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
@@ -263,14 +263,16 @@
   p
 Rollback from a newer release to the pre-upgrade release is always 
supported.
 However, it cannot be done in a rolling fashion.  It requires cluster 
downtime.
+Suppose emNN1/em and emNN2/em are respectively in active and 
standby states.
 Below are the steps for rollback:
   /p
   ul
 liRollback HDFSol
   liShutdown all emNNs/em and emDNs/em./li
   liRestore the pre-upgrade release in all machines./li
-  liStart emNNs/em with the
+  liStart emNN1/em as Active with the
 a href=#namenode_-rollingUpgradecode-rollingUpgrade 
rollback/code/a option./li
+  liRun `-bootstrapStandby' on NN2 and start it normally as standby./li
   liStart emDNs/em with the code-rollback/code option./li
 /ol/li
   /ul



hadoop git commit: YARN-3326. Support RESTful API for getLabelsToNodes. Contributed by Naganarasimha G R.

2015-04-15 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 96e12fa46 - e977247f3


YARN-3326. Support RESTful API for getLabelsToNodes. Contributed by 
Naganarasimha G R.

(cherry picked from commit e48cedc663b8a26fd62140c8e2907f9b4edd9785)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e977247f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e977247f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e977247f

Branch: refs/heads/branch-2
Commit: e977247f3eda48ac4cf2eac050887badb77e39d4
Parents: 96e12fa
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Apr 15 14:03:55 2015 -0500
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Apr 15 14:04:24 2015 -0500

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../resourcemanager/webapp/NodeIDsInfo.java | 47 +++
 .../resourcemanager/webapp/RMWebServices.java   | 61 +++-
 .../webapp/dao/LabelsToNodesInfo.java   | 43 ++
 .../webapp/TestRMWebServicesNodeLabels.java | 55 ++
 5 files changed, 194 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e977247f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bcbf445..ebeb7c2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -30,6 +30,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3318. Create Initial OrderingPolicy Framework and FifoOrderingPolicy.
 (Craig Welch via wangda)
 
+YARN-3326. Support RESTful API for getLabelsToNodes. (Naganarasimha G R
+via ozawa)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e977247f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
new file mode 100644
index 000..39d636d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name = labelsToNodesInfo)
+@XmlAccessorType(XmlAccessType.FIELD)
+public class NodeIDsInfo {
+
+  /**
+   * Set doesn't support default no arg constructor which is req by JAXB
+   */
+  protected ArrayListString nodeIDsList = new ArrayListString();
+
+  public NodeIDsInfo() {
+  } // JAXB needs this
+
+  public NodeIDsInfo(ListString nodeIdsList) {
+this.nodeIDsList.addAll(nodeIdsList);
+  }
+
+  public ArrayListString getNodeIDs() {
+return nodeIDsList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e977247f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
 

hadoop git commit: HDFS-8151. Always use snapshot path as source when invalid snapshot names are used for diff based distcp. Contributed by Jing Zhao.

2015-04-15 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e977247f3 - d4dd97eab


HDFS-8151. Always use snapshot path as source when invalid snapshot names are 
used for diff based distcp. Contributed by Jing Zhao.

(cherry picked from commit 4c097e473bb1f18d1510deb61bae2bcb8c156f18)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4dd97ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4dd97ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4dd97ea

Branch: refs/heads/branch-2
Commit: d4dd97eabd6691eedeeb9fb7685060dfb192ff21
Parents: e977247
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 12:37:20 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 15 12:39:09 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/tools/DistCpSync.java | 12 +++-
 .../java/org/apache/hadoop/tools/TestDistCpSync.java | 15 ++-
 3 files changed, 24 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4dd97ea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a8a44c8..4fcfc06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -204,6 +204,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8127. NameNode Failover during HA upgrade can cause DataNode to
 finalize upgrade. (jing9)
 
+HDFS-8151. Always use snapshot path as source when invalid snapshot names
+are used for diff based distcp. (jing9)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4dd97ea/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
index 8e71b6f..5bf638d 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
@@ -47,8 +47,8 @@ class DistCpSync {
 ListPath sourcePaths = inputOptions.getSourcePaths();
 if (sourcePaths.size() != 1) {
   // we only support one source dir which must be a snapshottable directory
-  DistCp.LOG.warn(sourcePaths.size() +  source paths are provided);
-  return false;
+  throw new IllegalArgumentException(sourcePaths.size()
+  +  source paths are provided);
 }
 final Path sourceDir = sourcePaths.get(0);
 final Path targetDir = inputOptions.getTargetPath();
@@ -59,15 +59,17 @@ class DistCpSync {
 // DistributedFileSystem.
 if (!(sfs instanceof DistributedFileSystem) ||
 !(tfs instanceof DistributedFileSystem)) {
-  DistCp.LOG.warn(To use diff-based distcp, the FileSystems needs to +
-   be DistributedFileSystem);
-  return false;
+  throw new IllegalArgumentException(The FileSystems needs to +
+   be DistributedFileSystem for using snapshot-diff-based distcp);
 }
 final DistributedFileSystem sourceFs = (DistributedFileSystem) sfs;
 final DistributedFileSystem targetFs= (DistributedFileSystem) tfs;
 
 // make sure targetFS has no change between from and the current states
 if (!checkNoChange(inputOptions, targetFs, targetDir)) {
+  // set the source path using the snapshot path
+  
inputOptions.setSourcePaths(Arrays.asList(getSourceSnapshotPath(sourceDir,
+  inputOptions.getToSnapshot(;
   return false;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4dd97ea/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
index 75d1de5..0a9a11f 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
@@ -88,24 +88,37 @@ public class TestDistCpSync {
   public void testFallback() throws Exception {
 // the source/target dir are not snapshottable dir
 Assert.assertFalse(DistCpSync.sync(options, conf));
+// make sure the source path has been updated to the snapshot path
+final Path spath = new Path(source,
+

hadoop git commit: YARN-3326. Support RESTful API for getLabelsToNodes. Contributed by Naganarasimha G R.

2015-04-15 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9e8309a1b - e48cedc66


YARN-3326. Support RESTful API for getLabelsToNodes. Contributed by 
Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e48cedc6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e48cedc6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e48cedc6

Branch: refs/heads/trunk
Commit: e48cedc663b8a26fd62140c8e2907f9b4edd9785
Parents: 9e8309a
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Apr 15 14:03:55 2015 -0500
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Apr 15 14:03:55 2015 -0500

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../resourcemanager/webapp/NodeIDsInfo.java | 47 +++
 .../resourcemanager/webapp/RMWebServices.java   | 61 +++-
 .../webapp/dao/LabelsToNodesInfo.java   | 43 ++
 .../webapp/TestRMWebServicesNodeLabels.java | 55 ++
 5 files changed, 194 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e48cedc6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ecdda61..4a29b77 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -78,6 +78,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3318. Create Initial OrderingPolicy Framework and FifoOrderingPolicy.
 (Craig Welch via wangda)
 
+YARN-3326. Support RESTful API for getLabelsToNodes. (Naganarasimha G R
+via ozawa)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e48cedc6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
new file mode 100644
index 000..39d636d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name = labelsToNodesInfo)
+@XmlAccessorType(XmlAccessType.FIELD)
+public class NodeIDsInfo {
+
+  /**
+   * Set doesn't support default no arg constructor which is req by JAXB
+   */
+  protected ArrayListString nodeIDsList = new ArrayListString();
+
+  public NodeIDsInfo() {
+  } // JAXB needs this
+
+  public NodeIDsInfo(ListString nodeIdsList) {
+this.nodeIDsList.addAll(nodeIdsList);
+  }
+
+  public ArrayListString getNodeIDs() {
+return nodeIDsList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e48cedc6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
 

hadoop git commit: YARN-3354. Add node label expression in ContainerTokenIdentifier to support RM recovery. Contributed by Wangda Tan (cherry picked from commit 1b89a3e173f8e905074ed6714a7be5c003c0e2c

2015-04-15 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 58f99b740 - 6fed2c2a7


YARN-3354. Add node label expression in ContainerTokenIdentifier to support RM 
recovery. Contributed by Wangda Tan
(cherry picked from commit 1b89a3e173f8e905074ed6714a7be5c003c0e2c4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fed2c2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fed2c2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fed2c2a

Branch: refs/heads/branch-2
Commit: 6fed2c2a791186d1140fe4f9d18966b058935b5b
Parents: 58f99b7
Author: Jian He jia...@apache.org
Authored: Wed Apr 15 13:57:06 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Wed Apr 15 14:03:29 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/security/ContainerTokenIdentifier.java |  21 +-
 .../main/proto/server/yarn_security_token.proto |   1 +
 .../api/protocolrecords/NMContainerStatus.java  |  22 +-
 .../impl/pb/NMContainerStatusPBImpl.java|  21 +-
 .../yarn_server_common_service_protos.proto |   1 +
 .../container/ContainerImpl.java|   7 +-
 .../containermanager/TestContainerManager.java  |   2 +-
 .../rmcontainer/RMContainer.java|   2 +
 .../rmcontainer/RMContainerImpl.java|  26 +-
 .../scheduler/AbstractYarnScheduler.java|   2 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  13 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   7 +-
 .../security/RMContainerTokenSecretManager.java |   7 +-
 .../server/resourcemanager/TestRMRestart.java   |  13 +-
 .../capacity/TestContainerAllocation.java   |   5 +-
 ...TestWorkPreservingRMRestartForNodeLabel.java | 282 +++
 17 files changed, 408 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fed2c2a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 06b7d75..9c391a2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -33,6 +33,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3326. Support RESTful API for getLabelsToNodes. (Naganarasimha G R
 via ozawa)
 
+YARN-3354. Add node label expression in ContainerTokenIdentifier to support
+RM recovery. (Wangda Tan via jianhe)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fed2c2a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index 593bfc3..9a60d01 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -40,6 +40,7 @@ import 
org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.LogAggregationContextPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import 
org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ContainerTokenIdentifierProto;
 
 import com.google.protobuf.TextFormat;
@@ -64,13 +65,14 @@ public class ContainerTokenIdentifier extends 
TokenIdentifier {
   String hostName, String appSubmitter, Resource r, long expiryTimeStamp,
   int masterKeyId, long rmIdentifier, Priority priority, long 
creationTime) {
 this(containerID, hostName, appSubmitter, r, expiryTimeStamp, masterKeyId,
-rmIdentifier, priority, creationTime, null);
+rmIdentifier, priority, creationTime, null,
+CommonNodeLabelsManager.NO_LABEL);
   }
 
   public ContainerTokenIdentifier(ContainerId containerID, String hostName,
   String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId,
   long rmIdentifier, Priority priority, long creationTime,
-  LogAggregationContext logAggregationContext) {
+  LogAggregationContext logAggregationContext, String nodeLabelExpression) 
{
 ContainerTokenIdentifierProto.Builder builder = 
 ContainerTokenIdentifierProto.newBuilder();
 if (containerID != null) {
@@ -93,6 +95,11 

hadoop git commit: YARN-3354. Add node label expression in ContainerTokenIdentifier to support RM recovery. Contributed by Wangda Tan

2015-04-15 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk b2e6cf607 - 1b89a3e17


YARN-3354. Add node label expression in ContainerTokenIdentifier to support RM 
recovery. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b89a3e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b89a3e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b89a3e1

Branch: refs/heads/trunk
Commit: 1b89a3e173f8e905074ed6714a7be5c003c0e2c4
Parents: b2e6cf6
Author: Jian He jia...@apache.org
Authored: Wed Apr 15 13:57:06 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Wed Apr 15 13:57:06 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/security/ContainerTokenIdentifier.java |  21 +-
 .../main/proto/server/yarn_security_token.proto |   1 +
 .../api/protocolrecords/NMContainerStatus.java  |  22 +-
 .../impl/pb/NMContainerStatusPBImpl.java|  21 +-
 .../yarn_server_common_service_protos.proto |   1 +
 .../container/ContainerImpl.java|   7 +-
 .../containermanager/TestContainerManager.java  |   2 +-
 .../rmcontainer/RMContainer.java|   2 +
 .../rmcontainer/RMContainerImpl.java|  26 +-
 .../scheduler/AbstractYarnScheduler.java|   2 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  13 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   7 +-
 .../security/RMContainerTokenSecretManager.java |   7 +-
 .../server/resourcemanager/TestRMRestart.java   |  13 +-
 .../capacity/TestContainerAllocation.java   |   5 +-
 ...TestWorkPreservingRMRestartForNodeLabel.java | 282 +++
 17 files changed, 408 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b89a3e1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7ec4b50..ecbdd3c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -81,6 +81,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3326. Support RESTful API for getLabelsToNodes. (Naganarasimha G R
 via ozawa)
 
+YARN-3354. Add node label expression in ContainerTokenIdentifier to support
+RM recovery. (Wangda Tan via jianhe)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b89a3e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index 593bfc3..9a60d01 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -40,6 +40,7 @@ import 
org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.LogAggregationContextPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import 
org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ContainerTokenIdentifierProto;
 
 import com.google.protobuf.TextFormat;
@@ -64,13 +65,14 @@ public class ContainerTokenIdentifier extends 
TokenIdentifier {
   String hostName, String appSubmitter, Resource r, long expiryTimeStamp,
   int masterKeyId, long rmIdentifier, Priority priority, long 
creationTime) {
 this(containerID, hostName, appSubmitter, r, expiryTimeStamp, masterKeyId,
-rmIdentifier, priority, creationTime, null);
+rmIdentifier, priority, creationTime, null,
+CommonNodeLabelsManager.NO_LABEL);
   }
 
   public ContainerTokenIdentifier(ContainerId containerID, String hostName,
   String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId,
   long rmIdentifier, Priority priority, long creationTime,
-  LogAggregationContext logAggregationContext) {
+  LogAggregationContext logAggregationContext, String nodeLabelExpression) 
{
 ContainerTokenIdentifierProto.Builder builder = 
 ContainerTokenIdentifierProto.newBuilder();
 if (containerID != null) {
@@ -93,6 +95,11 @@ public class ContainerTokenIdentifier extends 
TokenIdentifier {
   

hadoop git commit: HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits (Vinayakumar B)

2015-04-15 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 a6daad049 - 91e7bf81d


HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits 
(Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91e7bf81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91e7bf81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91e7bf81

Branch: refs/heads/HDFS-7285
Commit: 91e7bf81dc966d37e3fbebd9ff8aed2ed546f76d
Parents: a6daad0
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 12:23:07 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Apr 15 12:23:07 2015 +0530

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91e7bf81/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 07bbd4a..9fdac98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -51,11 +51,20 @@
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
 manage EC zones (Zhe Zhang)
 
+HDFS-7969. Erasure coding: NameNode support for lease recovery of striped
+block groups. (Zhe Zhang)
+
+HDFS-7782. Erasure coding: pread from files in striped layout.
+(Zhe Zhang and Jing Zhao via Zhe Zhang)
+
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
 NameNode (vinayakumarb)
 
 HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
 
+HDFS-8077. Erasure coding: fix bugs in EC zone and symlinks.
+(Jing Zhao and Zhe Zhang via Jing Zhao)
+
 HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
 
 HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
@@ -63,5 +72,11 @@
 HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
 ECSchemas loaded in Namenode. (vinayakumarb)
 
+HDFS-8122. Erasure Coding: Support specifying ECSchema during creation of 
ECZone.
+(Vinayakumar B via Zhe Zhang)
+
+HDFS-8114. Erasure coding: Add auditlog 
FSNamesystem#createErasureCodingZone if this
+operation fails. (Rakesh R via Zhe Zhang)
+
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
 separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file



hadoop git commit: HDFS-8123. Erasure Coding: Better to move EC related proto messages to a separate erasurecoding proto file (Contrubuted by Rakesh R)

2015-04-15 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 6e202bac1 - a6daad049


HDFS-8123. Erasure Coding: Better to move EC related proto messages to a 
separate erasurecoding proto file (Contrubuted by Rakesh R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6daad04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6daad04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6daad04

Branch: refs/heads/HDFS-7285
Commit: a6daad049b2ff3ea2fb1cd85fcc3edd8857d5472
Parents: 6e202ba
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 12:09:16 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Apr 15 12:09:16 2015 +0530

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  1 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 12 ++--
 .../ClientNamenodeProtocolTranslatorPB.java | 13 ++--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  6 +-
 .../namenode/ErasureCodingZoneManager.java  |  2 +-
 .../src/main/proto/ClientNamenodeProtocol.proto | 24 +--
 .../src/main/proto/erasurecoding.proto  | 74 
 .../hadoop-hdfs/src/main/proto/hdfs.proto   | 27 ---
 9 files changed, 96 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6daad04/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 5250dfa..07bbd4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -61,4 +61,7 @@
 HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
 
 HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
-ECSchemas loaded in Namenode. (vinayakumarb)
\ No newline at end of file
+ECSchemas loaded in Namenode. (vinayakumarb)
+
+HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
+separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6daad04/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index c11b963..a13a2bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -343,6 +343,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
   includehdfs.proto/include
   includeencryption.proto/include
   includeinotify.proto/include
+  includeerasurecoding.proto/include
 /includes
   /source
   
output${project.build.directory}/generated-sources/java/output

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6daad04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index cc5ca55..e3073ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -106,12 +106,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto;
-import 

hadoop git commit: HDFS-7349. Support DFS command for the EC encoding (Contributed by Vinayakumar B)

2015-04-15 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 91e7bf81d - 295ccbca7


HDFS-7349. Support DFS command for the EC encoding (Contributed by Vinayakumar 
B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/295ccbca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/295ccbca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/295ccbca

Branch: refs/heads/HDFS-7285
Commit: 295ccbca74a66d2dab89dd704908deb7071298e9
Parents: 91e7bf8
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 16:38:22 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Apr 15 16:38:22 2015 +0530

--
 .../main/java/org/apache/hadoop/fs/FsShell.java |   8 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |   5 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  18 ++
 .../hadoop/hdfs/DistributedFileSystem.java  |  32 +++
 .../hadoop/hdfs/protocol/ClientProtocol.java|   9 +
 .../apache/hadoop/hdfs/protocol/ECZoneInfo.java |  56 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  18 ++
 .../ClientNamenodeProtocolTranslatorPB.java |  19 ++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  12 ++
 .../namenode/ErasureCodingZoneManager.java  |  11 +-
 .../hdfs/server/namenode/FSDirectory.java   |  10 +
 .../hdfs/server/namenode/FSNamesystem.java  |  24 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +
 .../hadoop/hdfs/tools/erasurecode/ECCli.java|  48 +
 .../hdfs/tools/erasurecode/ECCommand.java   | 209 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |   2 +
 .../src/main/proto/erasurecoding.proto  |  15 ++
 18 files changed, 502 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/295ccbca/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index db73f6d..f873a01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -111,6 +111,10 @@ public class FsShell extends Configured implements Tool {
 return getTrash().getCurrentTrashDir();
   }
 
+  protected String getUsagePrefix() {
+return usagePrefix;
+  }
+
   // NOTE: Usage/Help are inner classes to allow access to outer methods
   // that access commandFactory
   
@@ -194,7 +198,7 @@ public class FsShell extends Configured implements Tool {
   }
 } else {
   // display help or usage for all commands 
-  out.println(usagePrefix);
+  out.println(getUsagePrefix());
   
   // display list of short usages
   ArrayListCommand instances = new ArrayListCommand();
@@ -218,7 +222,7 @@ public class FsShell extends Configured implements Tool {
   }
 
   private void printInstanceUsage(PrintStream out, Command instance) {
-out.println(usagePrefix +   + instance.getUsage());
+out.println(getUsagePrefix() +   + instance.getUsage());
   }
 
   private void printInstanceHelp(PrintStream out, Command instance) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295ccbca/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9fdac98..b9fc6fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -79,4 +79,6 @@
 operation fails. (Rakesh R via Zhe Zhang)
 
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
-separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file
+separate erasurecoding proto file (Rakesh R via vinayakumarb)
+
+HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295ccbca/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index f464261..84c79b8 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -134,6 +134,11 @@ case ${COMMAND} in
 hadoop_debug Appending HADOOP_CLIENT_OPTS onto