Repository: hadoop
Updated Branches:
  refs/heads/trunk ac578c0e8 -> 80e59e787


HDFS-14101. Fixing underflow error in test. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80e59e78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80e59e78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80e59e78

Branch: refs/heads/trunk
Commit: 80e59e7876fed1c9d56e696331e0c54e7cd3499b
Parents: ac578c0
Author: Sean Mackrory <mackror...@apache.org>
Authored: Fri Dec 7 17:18:20 2018 -0700
Committer: Sean Mackrory <mackror...@apache.org>
Committed: Mon Dec 10 14:03:08 2018 -0700

----------------------------------------------------------------------
 .../namenode/TestListCorruptFileBlocks.java     | 24 ++++++++++++--------
 1 file changed, 14 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80e59e78/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
index db12146..6bfc64d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
@@ -80,10 +80,13 @@ public class TestListCorruptFileBlocks {
       cluster = new MiniDFSCluster.Builder(conf).build();
       FileSystem fs = cluster.getFileSystem();
 
+      // Files are corrupted with 2 bytes before the end of the file,
+      // so that's the minimum length.
+      final int corruptionLength = 2;
       // create two files with one block each
       DFSTestUtil util = new DFSTestUtil.Builder().
           setName("testCorruptFilesCorruptedBlock").setNumFiles(2).
-          setMaxLevels(1).setMaxSize(512).build();
+          setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build();
       util.createFiles(fs, "/srcdat10");
 
       // fetch bad file list from namenode. There should be none.
@@ -104,14 +107,13 @@ public class TestListCorruptFileBlocks {
       File metaFile = metaFiles.get(0);
       RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
       FileChannel channel = file.getChannel();
-      long position = channel.size() - 2;
-      int length = 2;
-      byte[] buffer = new byte[length];
+      long position = channel.size() - corruptionLength;
+      byte[] buffer = new byte[corruptionLength];
       new Random(13L).nextBytes(buffer);
       channel.write(ByteBuffer.wrap(buffer), position);
       file.close();
       LOG.info("Deliberately corrupting file " + metaFile.getName() +
-          " at offset " + position + " length " + length);
+          " at offset " + position + " length " + corruptionLength);
 
       // read all files to trigger detection of corrupted replica
       try {
@@ -160,10 +162,13 @@ public class TestListCorruptFileBlocks {
           HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
       FileSystem fs = cluster.getFileSystem();
 
+      // Files are corrupted with 2 bytes before the end of the file,
+      // so that's the minimum length.
+      final int corruptionLength = 2;
       // create two files with one block each
       DFSTestUtil util = new DFSTestUtil.Builder().
           setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2).
-          setMaxLevels(1).setMaxSize(512).build();
+          setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build();
       util.createFiles(fs, "/srcdat10");
 
       // fetch bad file list from namenode. There should be none.
@@ -183,14 +188,13 @@ public class TestListCorruptFileBlocks {
       File metaFile = metaFiles.get(0);
       RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
       FileChannel channel = file.getChannel();
-      long position = channel.size() - 2;
-      int length = 2;
-      byte[] buffer = new byte[length];
+      long position = channel.size() - corruptionLength;
+      byte[] buffer = new byte[corruptionLength];
       new Random(13L).nextBytes(buffer);
       channel.write(ByteBuffer.wrap(buffer), position);
       file.close();
       LOG.info("Deliberately corrupting file " + metaFile.getName() +
-          " at offset " + position + " length " + length);
+          " at offset " + position + " length " + corruptionLength);
 
       // read all files to trigger detection of corrupted replica
       try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to