Repository: hadoop Updated Branches: refs/heads/branch-2 94eb8e5b3 -> e39c58fd9
HDFS-7961. Trigger full block report after hot swapping disk. Contributed by Eddy Xu. (cherry picked from commit 6413d34986f3399023426c89c9a0d401c9557716) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e39c58fd Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e39c58fd Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e39c58fd Branch: refs/heads/branch-2 Commit: e39c58fd9dfa03851301d105abcb2e62f757b3f5 Parents: 94eb8e5 Author: Andrew Wang <w...@apache.org> Authored: Tue Mar 24 09:07:02 2015 -0700 Committer: Andrew Wang <w...@apache.org> Committed: Tue Mar 24 09:07:24 2015 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/server/datanode/DataNode.java | 4 ++ .../datanode/TestDataNodeHotSwapVolumes.java | 42 ++++++++++++++++++++ 3 files changed, 48 insertions(+) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e39c58fd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8977904..dda75f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -951,6 +951,8 @@ Release 2.7.0 - UNRELEASED HDFS-7960. The full block report should prune zombie storages even if they're not empty. (cmccabe and Eddy Xu via wang) + HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via wang) + BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS HDFS-7720. Quota by Storage Type API, tools and ClientNameNode http://git-wip-us.apache.org/repos/asf/hadoop/blob/e39c58fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index b32a0fc..c6641f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -637,6 +637,10 @@ public class DataNode extends ReconfigurableBase conf.set(DFS_DATANODE_DATA_DIR_KEY, Joiner.on(",").join(effectiveVolumes)); dataDirs = getStorageLocations(conf); + + // Send a full block report to let NN acknowledge the volume changes. + triggerBlockReport(new BlockReportOptions.Factory() + .setIncremental(false).build()); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e39c58fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 2f51d45..f5772e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -34,12 +34,16 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; +import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Test; @@ -59,6 +63,7 @@ import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.mockito.Mockito; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.hamcrest.CoreMatchers.anyOf; @@ -70,6 +75,9 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.timeout; public class TestDataNodeHotSwapVolumes { private static final Log LOG = LogFactory.getLog( @@ -702,4 +710,38 @@ public class TestDataNodeHotSwapVolumes { // More data has been written to this volume. assertTrue(restoredVolume.getDfsUsed() > used); } + + /** Test that a full block report is sent after hot swapping volumes */ + @Test(timeout=100000) + public void testFullBlockReportAfterRemovingVolumes() + throws IOException, ReconfigurationException { + + Configuration conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + + // Similar to TestTriggerBlockReport, set a really long value for + // dfs.heartbeat.interval, so that incremental block reports and heartbeats + // won't be sent during this test unless they're triggered + // manually. + conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L); + conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L); + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + cluster.waitActive(); + + final DataNode dn = cluster.getDataNodes().get(0); + DatanodeProtocolClientSideTranslatorPB spy = + DataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode()); + + // Remove a data dir from datanode + File dataDirToKeep = new File(cluster.getDataDirectory(), "data1"); + dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, dataDirToKeep.toString()); + + // We should get 1 full report + Mockito.verify(spy, timeout(60000).times(1)).blockReport( + any(DatanodeRegistration.class), + anyString(), + any(StorageBlockReport[].class), + any(BlockReportContext.class)); + } }