Repository: hadoop Updated Branches: refs/heads/branch-2 88c749a90 -> 737dca195
HDFS-11399. Many tests fails in Windows due to injecting disk failures. Contributed by Yiqun Lin. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/737dca19 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/737dca19 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/737dca19 Branch: refs/heads/branch-2 Commit: 737dca1959643dcff3ae9c89325e842d49dff1f6 Parents: 88c749a Author: Inigo Goiri <inigo...@apache.org> Authored: Mon Mar 12 12:54:42 2018 -0700 Committer: Inigo Goiri <inigo...@apache.org> Committed: Mon Mar 12 12:54:42 2018 -0700 ---------------------------------------------------------------------- .../blockmanagement/TestBlockStatsMXBean.java | 8 ++++++++ .../datanode/TestDataNodeVolumeFailure.java | 20 ++++++++++++++++++++ 2 files changed, 28 insertions(+) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/737dca19/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java index eb4f6d7..096df66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java @@ -39,10 +39,12 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.util.Shell; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; +import org.junit.internal.AssumptionViolatedException; import org.mortbay.util.ajax.JSON; import org.junit.rules.Timeout; @@ -160,6 +162,12 @@ public class TestBlockStatsMXBean { @Test public void testStorageTypeStatsWhenStorageFailed() throws Exception { + if (Shell.WINDOWS) { + // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate + // volume failures which is currently not supported on Windows. + throw new AssumptionViolatedException("Expected Unix-like platform"); + } + DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/blockStatsFile1"), 1024, (short) 1, 0L); Map<StorageType, StorageTypeStats> storageTypeStatsMap = cluster http://git-wip-us.apache.org/repos/asf/hadoop/blob/737dca19/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 216ecaf..bafc7e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -74,6 +74,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Shell; import org.apache.commons.io.FileUtils; import org.apache.commons.io.filefilter.TrueFileFilter; @@ -83,6 +84,7 @@ import com.google.common.base.Supplier; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.internal.AssumptionViolatedException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -307,6 +309,12 @@ public class TestDataNodeVolumeFailure { @Test(timeout=10000) public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated() throws InterruptedException, IOException { + if (Shell.WINDOWS) { + // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate + // volume failures which is currently not supported on Windows. + throw new AssumptionViolatedException("Expected Unix-like platform"); + } + // make both data directories to fail on dn0 final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2)); @@ -326,6 +334,12 @@ public class TestDataNodeVolumeFailure { @Test public void testVolumeFailureRecoveredByHotSwappingVolume() throws InterruptedException, ReconfigurationException, IOException { + if (Shell.WINDOWS) { + // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate + // volume failures which is currently not supported on Windows. + throw new AssumptionViolatedException("Expected Unix-like platform"); + } + final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2)); final DataNode dn0 = cluster.getDataNodes().get(0); @@ -364,6 +378,12 @@ public class TestDataNodeVolumeFailure { @Test public void testTolerateVolumeFailuresAfterAddingMoreVolumes() throws InterruptedException, ReconfigurationException, IOException { + if (Shell.WINDOWS) { + // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate + // volume failures which is currently not supported on Windows. + throw new AssumptionViolatedException("Expected Unix-like platform"); + } + final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2)); final File dn0VolNew = new File(dataDir, "data_new"); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org