MingXiangLi commented on a change in pull request #4085:
URL: https://github.com/apache/hadoop/pull/4085#discussion_r835863275
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
##########
@@ -602,6 +606,54 @@ public void run() {}
+ "volumeMap.", 0, totalNumReplicas);
}
+ @Test(timeout = 30000)
+ public void testConcurrentWriteAndDeleteBlock() throws Exception {
+ // Feed FsDataset with block metadata.
+ final int numBlocks = 1000;
+ final int threadCount = 10;
+ // Generate data blocks.
+ ExecutorService pool = Executors.newFixedThreadPool(threadCount);
+ List<Future<?>> futureList = new ArrayList<>();
+ Random random = new Random();
+ // Random write block and delete half of them.
+ for (int i = 0; i < threadCount; i++) {
+ Thread thread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ String bpid =
BLOCK_POOL_IDS[random.nextInt(BLOCK_POOL_IDS.length)];
+ for (int blockId = 0; blockId < numBlocks; blockId++) {
+ ExtendedBlock eb = new ExtendedBlock(bpid, blockId);
+ ReplicaHandler replica = null;
+ try {
+ replica = dataset.createRbw(StorageType.DEFAULT, null, eb,
+ false);
+ if (blockId % 2 > 0) {
+ dataset.invalidate(bpid, new Block[]{eb.getLocalBlock()});
+ }
+ } finally {
+ if (replica != null) {
+ replica.close();
+ }
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
Review comment:
Ok, has already been updated
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]