This is an automated email from the ASF dual-hosted git repository.
arp pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-2.9 by this push:
new 4a1d51d HDFS-13677. Dynamic refresh Disk configuration results in
overwriting VolumeMap. Contributed by xuzq and Stephen O'Donnell.
4a1d51d is described below
commit 4a1d51dea2149e8f458467467798e81b126b7cc5
Author: Arpit Agarwal <[email protected]>
AuthorDate: Mon May 6 13:34:38 2019 -0700
HDFS-13677. Dynamic refresh Disk configuration results in overwriting
VolumeMap. Contributed by xuzq and Stephen O'Donnell.
(cherry picked from commit 102c8fca10f3c626ab8bc47f818c8391a5c35289)
---
.../datanode/fsdataset/impl/FsDatasetImpl.java | 2 +-
.../server/datanode/fsdataset/impl/ReplicaMap.java | 14 ++++-
.../datanode/TestDataNodeHotSwapVolumes.java | 70 ++++++++++++++++++++++
.../datanode/fsdataset/impl/TestReplicaMap.java | 22 +++++++
4 files changed, 106 insertions(+), 2 deletions(-)
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 4486b73..9946a3f 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -432,7 +432,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
LOG.error(errorMsg);
throw new IOException(errorMsg);
}
- volumeMap.addAll(replicaMap);
+ volumeMap.mergeAll(replicaMap);
storageMap.put(sd.getStorageUuid(),
new DatanodeStorage(sd.getStorageUuid(),
DatanodeStorage.State.NORMAL,
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
index 5705792..e94670a 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
@@ -126,7 +126,19 @@ class ReplicaMap {
void addAll(ReplicaMap other) {
map.putAll(other.map);
}
-
+
+
+ /**
+ * Merge all entries from the given replica map into the local replica map.
+ */
+ void mergeAll(ReplicaMap other) {
+ for(String bp : other.getBlockPoolList()) {
+ for(ReplicaInfo r : other.map.get(bp)) {
+ add(bp, r);
+ }
+ }
+ }
+
/**
* Remove the replica's meta information from the map that matches
* the input block's id and generation stamp
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index ea28ea4..125b431 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -47,6 +47,7 @@ import
org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Assert;
@@ -416,6 +417,75 @@ public class TestDataNodeHotSwapVolumes {
verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
}
+ /**
+ * Test re-adding one volume with some blocks on a running MiniDFSCluster
+ * with only one NameNode to reproduce HDFS-13677.
+ */
+ @Test(timeout=60000)
+ public void testReAddVolumeWithBlocks()
+ throws IOException, ReconfigurationException,
+ InterruptedException, TimeoutException {
+ startDFSCluster(1, 1);
+ String bpid = cluster.getNamesystem().getBlockPoolId();
+ final int numBlocks = 10;
+
+ Path testFile = new Path("/test");
+ createFile(testFile, numBlocks);
+
+ List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
+ cluster.getAllBlockReports(bpid);
+ assertEquals(1, blockReports.size()); // 1 DataNode
+ assertEquals(2, blockReports.get(0).size()); // 2 volumes
+
+ // Now remove the second volume
+ DataNode dn = cluster.getDataNodes().get(0);
+ Collection<String> oldDirs = getDataDirs(dn);
+ String newDirs = oldDirs.iterator().next(); // Keep the first volume.
+ assertThat(
+ "DN did not update its own config",
+ dn.reconfigurePropertyImpl(
+ DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs),
+ is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
+ assertFileLocksReleased(
+ new ArrayList<String>(oldDirs).subList(1, oldDirs.size()));
+
+ // Now create another file - the first volume should have 15 blocks
+ // and 5 blocks on the previously removed volume
+ createFile(new Path("/test2"), numBlocks);
+ dn.scheduleAllBlockReport(0);
+ blockReports = cluster.getAllBlockReports(bpid);
+
+ assertEquals(1, blockReports.size()); // 1 DataNode
+ assertEquals(1, blockReports.get(0).size()); // 1 volume
+ for (BlockListAsLongs blockList : blockReports.get(0).values()) {
+ assertEquals(15, blockList.getNumberOfBlocks());
+ }
+
+ // Now add the original volume back again and ensure 15 blocks are reported
+ assertThat(
+ "DN did not update its own config",
+ dn.reconfigurePropertyImpl(
+ DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+ StringUtils.join(",", oldDirs)),
+ is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
+ dn.scheduleAllBlockReport(0);
+ blockReports = cluster.getAllBlockReports(bpid);
+
+ assertEquals(1, blockReports.size()); // 1 DataNode
+ assertEquals(2, blockReports.get(0).size()); // 2 volumes
+
+ // The order of the block reports is not guaranteed. As we expect 2, get
the
+ // max block count and the min block count and then assert on that.
+ int minNumBlocks = Integer.MAX_VALUE;
+ int maxNumBlocks = Integer.MIN_VALUE;
+ for (BlockListAsLongs blockList : blockReports.get(0).values()) {
+ minNumBlocks = Math.min(minNumBlocks, blockList.getNumberOfBlocks());
+ maxNumBlocks = Math.max(maxNumBlocks, blockList.getNumberOfBlocks());
+ }
+ assertEquals(5, minNumBlocks);
+ assertEquals(15, maxNumBlocks);
+ }
+
@Test(timeout=60000)
public void testAddVolumesDuringWrite()
throws IOException, InterruptedException, TimeoutException,
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
index 4fa91b0..1059c08 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
@@ -108,4 +108,26 @@ public class TestReplicaMap {
map.add(bpid, new FinalizedReplica(block, null, null));
assertNotNull(map.remove(bpid, block.getBlockId()));
}
+
+ @Test
+ public void testMergeAll() {
+ ReplicaMap temReplicaMap = new ReplicaMap(new AutoCloseableLock());
+ Block tmpBlock = new Block(5678, 5678, 5678);
+ temReplicaMap.add(bpid, new FinalizedReplica(tmpBlock, null, null));
+
+ map.mergeAll(temReplicaMap);
+ assertNotNull(map.get(bpid, 1234));
+ assertNotNull(map.get(bpid, 5678));
+ }
+
+ @Test
+ public void testAddAll() {
+ ReplicaMap temReplicaMap = new ReplicaMap(new AutoCloseableLock());
+ Block tmpBlock = new Block(5678, 5678, 5678);
+ temReplicaMap.add(bpid, new FinalizedReplica(tmpBlock, null, null));
+
+ map.addAll(temReplicaMap);
+ assertNull(map.get(bpid, 1234));
+ assertNotNull(map.get(bpid, 5678));
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]