HDFS-4681. TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks fails using IBM java (Ayappan via aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/133d04cb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/133d04cb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/133d04cb Branch: refs/heads/HDFS-7285 Commit: 133d04cb8972fa3e1e7a7babdc41c21b7340e34c Parents: 260883b Author: Allen Wittenauer <[email protected]> Authored: Sat Feb 28 23:32:09 2015 -0800 Committer: Zhe Zhang <[email protected]> Committed: Mon Mar 2 09:13:55 2015 -0800 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../test/java/org/apache/hadoop/hdfs/DFSTestUtil.java | 12 ++++++++++++ .../java/org/apache/hadoop/hdfs/MiniDFSCluster.java | 10 ++++++++++ .../blockmanagement/TestBlocksWithNotEnoughRacks.java | 7 ++++--- 4 files changed, 29 insertions(+), 3 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/133d04cb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2a8da43..16fe394 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -306,6 +306,9 @@ Trunk (Unreleased) HDFS-7803. Wrong command mentioned in HDFSHighAvailabilityWithQJM documentation (Arshad Mohammad via aw) + HDFS-4681. TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks + fails using IBM java (Ayappan via aw) + Release 2.7.0 - UNRELEASED INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/133d04cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 5f05d94..c3dac35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -252,6 +252,12 @@ public class DFSTestUtil { public void createFiles(FileSystem fs, String topdir) throws IOException { createFiles(fs, topdir, (short)3); } + + public static byte[] readFileAsBytes(FileSystem fs, Path fileName) throws IOException { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + IOUtils.copyBytes(fs.open(fileName), os, 1024, true); + return os.toByteArray(); + } /** create nFiles with random names and directory hierarchies * with random (but reproducible) data in them. @@ -724,6 +730,12 @@ public class DFSTestUtil { return b.toString(); } + public static byte[] readFileAsBytes(File f) throws IOException { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + IOUtils.copyBytes(new FileInputStream(f), os, 1024, true); + return os.toByteArray(); + } + /* Write the given string to the given file */ public static void writeFile(FileSystem fs, Path p, String s) throws IOException { http://git-wip-us.apache.org/repos/asf/hadoop/blob/133d04cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 5297ba2..2c1d07e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -1869,6 +1869,16 @@ public class MiniDFSCluster { return null; } + public byte[] readBlockOnDataNodeAsBytes(int i, ExtendedBlock block) + throws IOException { + assert (i >= 0 && i < dataNodes.size()) : "Invalid datanode "+i; + File blockFile = getBlockFile(i, block); + if (blockFile != null && blockFile.exists()) { + return DFSTestUtil.readFileAsBytes(blockFile); + } + return null; + } + /** * Corrupt a block on a particular datanode. * http://git-wip-us.apache.org/repos/asf/hadoop/blob/133d04cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java index 1bc7cdc..54983a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertArrayEquals; import java.util.ArrayList; @@ -202,7 +203,7 @@ public class TestBlocksWithNotEnoughRacks { final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L); - final String fileContent = DFSTestUtil.readFile(fs, filePath); + final byte[] fileContent = DFSTestUtil.readFileAsBytes(fs, filePath); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); @@ -224,9 +225,9 @@ public class TestBlocksWithNotEnoughRacks { // Ensure all replicas are valid (the corrupt replica may not // have been cleaned up yet). for (int i = 0; i < racks.length; i++) { - String blockContent = cluster.readBlockOnDataNode(i, b); + byte[] blockContent = cluster.readBlockOnDataNodeAsBytes(i, b); if (blockContent != null && i != dnToCorrupt) { - assertEquals("Corrupt replica", fileContent, blockContent); + assertArrayEquals("Corrupt replica", fileContent, blockContent); } } } finally {
