[13/50] [abbrv] hadoop git commit: HDFS-8117. More accurate verification in SimulatedFSDataset: replace DEFAULT_DATABYTE with patterned data. Contributed by Zhe Zhang.

2015-04-17 Thread zjshen
HDFS-8117. More accurate verification in SimulatedFSDataset: replace 
DEFAULT_DATABYTE with patterned data. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45fa3321
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45fa3321
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45fa3321

Branch: refs/heads/YARN-2928
Commit: 45fa3321cb0b389c781be11c30c3fb2a280f7a5b
Parents: ad69744
Author: Andrew Wang w...@apache.org
Authored: Mon Apr 13 13:01:10 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Fri Apr 17 15:29:41 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 25 
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 12 +-
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 21 +---
 .../org/apache/hadoop/hdfs/TestSmallBlock.java  | 16 ++---
 .../server/datanode/SimulatedFSDataset.java | 25 ++--
 .../server/datanode/TestSimulatedFSDataset.java |  3 ++-
 7 files changed, 70 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45fa3321/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2a26544..1aaf42c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -431,6 +431,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8083. Move dfs.client.write.* conf from DFSConfigKeys to 
 HdfsClientConfigKeys.Write.  (szetszwo)
 
+HDFS-8117. More accurate verification in SimulatedFSDataset: replace
+DEFAULT_DATABYTE with patterned data. (Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45fa3321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 1b3b62d..ae2d403 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -117,6 +118,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
@@ -1769,4 +1771,27 @@ public class DFSTestUtil {
 dn.setLastUpdateMonotonic(Time.monotonicNow() + offset);
   }
 
+  /**
+   * This method takes a set of block locations and fills the provided buffer
+   * with expected bytes based on simulated content from
+   * {@link SimulatedFSDataset}.
+   *
+   * @param lbs The block locations of a file
+   * @param expected The buffer to be filled with expected bytes on the above
+   * locations.
+   */
+  public static void fillExpectedBuf(LocatedBlocks lbs, byte[] expected) {
+Block[] blks = new Block[lbs.getLocatedBlocks().size()];
+for (int i = 0; i  lbs.getLocatedBlocks().size(); i++) {
+  blks[i] = lbs.getLocatedBlocks().get(i).getBlock().getLocalBlock();
+}
+int bufPos = 0;
+for (Block b : blks) {
+  for (long blkPos = 0; blkPos  b.getNumBytes(); blkPos++) {
+assert bufPos  expected.length;
+expected[bufPos++] = SimulatedFSDataset.simulatedByte(b, blkPos);
+  }
+}
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45fa3321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
--
diff --git 

hadoop git commit: HDFS-8117. More accurate verification in SimulatedFSDataset: replace DEFAULT_DATABYTE with patterned data. Contributed by Zhe Zhang.

2015-04-15 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 01af29106 - 871bf6a76


HDFS-8117. More accurate verification in SimulatedFSDataset: replace 
DEFAULT_DATABYTE with patterned data. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/871bf6a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/871bf6a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/871bf6a7

Branch: refs/heads/branch-2
Commit: 871bf6a765b56215fc88c3dcfb52be4c209b82c1
Parents: 01af291
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 15 08:43:42 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 15 08:43:42 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 25 
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 12 +-
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 19 +--
 .../org/apache/hadoop/hdfs/TestSmallBlock.java  | 14 ++-
 .../server/datanode/SimulatedFSDataset.java | 25 ++--
 .../server/datanode/TestSimulatedFSDataset.java |  3 ++-
 7 files changed, 69 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bf6a7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bf9a634..32df2f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -113,6 +113,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8083. Move dfs.client.write.* conf from DFSConfigKeys to 
 HdfsClientConfigKeys.Write.  (szetszwo)
 
+HDFS-8117. More accurate verification in SimulatedFSDataset: replace
+DEFAULT_DATABYTE with patterned data. (Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bf6a7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index c3cefdf..aa73499 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -117,6 +118,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
@@ -1776,4 +1778,27 @@ public class DFSTestUtil {
 dn.setLastUpdate(Time.now() + offset);
 dn.setLastUpdateMonotonic(Time.monotonicNow() + offset);
   }
+
+  /**
+   * This method takes a set of block locations and fills the provided buffer
+   * with expected bytes based on simulated content from
+   * {@link SimulatedFSDataset}.
+   *
+   * @param lbs The block locations of a file
+   * @param expected The buffer to be filled with expected bytes on the above
+   * locations.
+   */
+  public static void fillExpectedBuf(LocatedBlocks lbs, byte[] expected) {
+Block[] blks = new Block[lbs.getLocatedBlocks().size()];
+for (int i = 0; i  lbs.getLocatedBlocks().size(); i++) {
+  blks[i] = lbs.getLocatedBlocks().get(i).getBlock().getLocalBlock();
+}
+int bufPos = 0;
+for (Block b : blks) {
+  for (long blkPos = 0; blkPos  b.getNumBytes(); blkPos++) {
+assert bufPos  expected.length;
+expected[bufPos++] = SimulatedFSDataset.simulatedByte(b, blkPos);
+  }
+}
+  }
 }


hadoop git commit: HDFS-8117. More accurate verification in SimulatedFSDataset: replace DEFAULT_DATABYTE with patterned data. Contributed by Zhe Zhang.

2015-04-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7fc50e252 - d60e22152


HDFS-8117. More accurate verification in SimulatedFSDataset: replace 
DEFAULT_DATABYTE with patterned data. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d60e2215
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d60e2215
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d60e2215

Branch: refs/heads/trunk
Commit: d60e22152ac098da103fd37fb81f8758e68d1efa
Parents: 7fc50e2
Author: Andrew Wang w...@apache.org
Authored: Mon Apr 13 13:01:10 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Mon Apr 13 13:01:10 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 25 
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 12 +-
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 21 +---
 .../org/apache/hadoop/hdfs/TestSmallBlock.java  | 16 ++---
 .../server/datanode/SimulatedFSDataset.java | 25 ++--
 .../server/datanode/TestSimulatedFSDataset.java |  3 ++-
 7 files changed, 70 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d60e2215/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2a26544..1aaf42c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -431,6 +431,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8083. Move dfs.client.write.* conf from DFSConfigKeys to 
 HdfsClientConfigKeys.Write.  (szetszwo)
 
+HDFS-8117. More accurate verification in SimulatedFSDataset: replace
+DEFAULT_DATABYTE with patterned data. (Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d60e2215/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 1b3b62d..ae2d403 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -117,6 +118,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
@@ -1769,4 +1771,27 @@ public class DFSTestUtil {
 dn.setLastUpdateMonotonic(Time.monotonicNow() + offset);
   }
 
+  /**
+   * This method takes a set of block locations and fills the provided buffer
+   * with expected bytes based on simulated content from
+   * {@link SimulatedFSDataset}.
+   *
+   * @param lbs The block locations of a file
+   * @param expected The buffer to be filled with expected bytes on the above
+   * locations.
+   */
+  public static void fillExpectedBuf(LocatedBlocks lbs, byte[] expected) {
+Block[] blks = new Block[lbs.getLocatedBlocks().size()];
+for (int i = 0; i  lbs.getLocatedBlocks().size(); i++) {
+  blks[i] = lbs.getLocatedBlocks().get(i).getBlock().getLocalBlock();
+}
+int bufPos = 0;
+for (Block b : blks) {
+  for (long blkPos = 0; blkPos  b.getNumBytes(); blkPos++) {
+assert bufPos  expected.length;
+expected[bufPos++] = SimulatedFSDataset.simulatedByte(b, blkPos);
+  }
+}
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d60e2215/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java