ZanderXu commented on code in PR #4903:
URL: https://github.com/apache/hadoop/pull/4903#discussion_r981060797


##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java:
##########
@@ -1891,23 +1894,22 @@ public void delayDeleteReplica() {
       // If this replica is deleted from memory, the client would got an 
ReplicaNotFoundException.
       assertNotNull(ds.getStoredBlock(bpid, extendedBlock.getBlockId()));
 
-      // Make it resume the removeReplicaFromMem method
+      // Make it resume the removeReplicaFromMem method.
       semaphore.release(1);
 
       // Sleep for 1 second so that datanode can complete invalidate.

Review Comment:
   How about change this comment to `Waiting for the async deletion task 
finish`?



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java:
##########
@@ -1891,23 +1894,22 @@ public void delayDeleteReplica() {
       // If this replica is deleted from memory, the client would got an 
ReplicaNotFoundException.
       assertNotNull(ds.getStoredBlock(bpid, extendedBlock.getBlockId()));
 
-      // Make it resume the removeReplicaFromMem method
+      // Make it resume the removeReplicaFromMem method.
       semaphore.release(1);
 
       // Sleep for 1 second so that datanode can complete invalidate.
-      GenericTestUtils.waitFor(new com.google.common.base.Supplier<Boolean>() {
-        @Override public Boolean get() {
-          return ds.asyncDiskService.countPendingDeletions() == 0;
-        }
-      }, 100, 1000);
+      GenericTestUtils.waitFor(() -> 
ds.asyncDiskService.countPendingDeletions() == 0,
+          100, 1000);

Review Comment:
   single line.



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java:
##########
@@ -2400,6 +2400,93 @@ public void invalidate(String bpid, ReplicaInfo block) {
         block.getStorageUuid());
   }
 
+  /**
+   * Remove Replica from ReplicaMap
+   *
+   * @param block
+   * @param volume
+   * @return
+   */
+  public boolean removeReplicaFromMem(final ExtendedBlock block, final 
FsVolumeImpl volume) {
+    final String blockPoolId = block.getBlockPoolId();
+    final Block localBlock = block.getLocalBlock();
+    final long blockId = localBlock.getBlockId();
+    try (AutoCloseableLock lock = lockManager.writeLock(
+        LockLevel.BLOCK_POOl, blockPoolId)) {
+      final ReplicaInfo info = volumeMap.get(blockPoolId, localBlock);
+      if (info == null) {
+        ReplicaInfo infoByBlockId =
+            volumeMap.get(blockPoolId, blockId);
+        if (infoByBlockId == null) {
+          // It is okay if the block is not found -- it
+          // may be deleted earlier.
+          LOG.info("Failed to delete replica {}: ReplicaInfo not found " +
+                  "in removeReplicaFromMem.", localBlock);
+        } else {
+          LOG.error("Failed to delete replica {}: GenerationStamp not matched, 
" +
+              "existing replica is {} in removeReplicaFromMem.",
+              localBlock, Block.toString(infoByBlockId));
+        }
+        return false;
+      }
+
+      FsVolumeImpl v = (FsVolumeImpl)info.getVolume();

Review Comment:
   `FsVolumeImpl v = (FsVolumeImpl) info.getVolume();`



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java:
##########
@@ -2400,6 +2400,93 @@ public void invalidate(String bpid, ReplicaInfo block) {
         block.getStorageUuid());
   }
 
+  /**
+   * Remove Replica from ReplicaMap
+   *
+   * @param block
+   * @param volume
+   * @return
+   */
+  public boolean removeReplicaFromMem(final ExtendedBlock block, final 
FsVolumeImpl volume) {
+    final String blockPoolId = block.getBlockPoolId();
+    final Block localBlock = block.getLocalBlock();
+    final long blockId = localBlock.getBlockId();
+    try (AutoCloseableLock lock = lockManager.writeLock(
+        LockLevel.BLOCK_POOl, blockPoolId)) {
+      final ReplicaInfo info = volumeMap.get(blockPoolId, localBlock);
+      if (info == null) {
+        ReplicaInfo infoByBlockId =
+            volumeMap.get(blockPoolId, blockId);
+        if (infoByBlockId == null) {
+          // It is okay if the block is not found -- it
+          // may be deleted earlier.
+          LOG.info("Failed to delete replica {}: ReplicaInfo not found " +
+                  "in removeReplicaFromMem.", localBlock);
+        } else {
+          LOG.error("Failed to delete replica {}: GenerationStamp not matched, 
" +
+              "existing replica is {} in removeReplicaFromMem.",
+              localBlock, Block.toString(infoByBlockId));
+        }
+        return false;
+      }
+
+      FsVolumeImpl v = (FsVolumeImpl)info.getVolume();
+      if (v == null) {
+        LOG.error("Failed to delete replica {}. No volume for this replica {} "
+            + "in removeReplicaFromMem.", localBlock, info);
+        return false;
+      }
+
+      try {
+        File blockFile = new File(info.getBlockURI());
+        if (blockFile != null && blockFile.getParentFile() == null) {

Review Comment:
   can remove the redundant `blockFile != null`



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java:
##########
@@ -2400,6 +2400,93 @@ public void invalidate(String bpid, ReplicaInfo block) {
         block.getStorageUuid());
   }
 
+  /**
+   * Remove Replica from ReplicaMap
+   *
+   * @param block
+   * @param volume
+   * @return
+   */
+  public boolean removeReplicaFromMem(final ExtendedBlock block, final 
FsVolumeImpl volume) {
+    final String blockPoolId = block.getBlockPoolId();
+    final Block localBlock = block.getLocalBlock();
+    final long blockId = localBlock.getBlockId();
+    try (AutoCloseableLock lock = lockManager.writeLock(
+        LockLevel.BLOCK_POOl, blockPoolId)) {

Review Comment:
   single line.



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java:
##########
@@ -340,9 +340,10 @@ private boolean moveFiles() {
     @Override
     public void run() {
       try {
-        // For testing. Normally no-op.
+        // For testing, simulate the case asynchronously deletion of the
+        // replica task stacked pending.
         DataNodeFaultInjector.get().delayDeleteReplica();
-        if (!removeReplicaFromMem()){
+        if (!fsdatasetImpl.removeReplicaFromMem(block, volume)){

Review Comment:
   `if (!fsdatasetImpl.removeReplicaFromMem(block, volume)) {`



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java:
##########
@@ -2400,6 +2400,93 @@ public void invalidate(String bpid, ReplicaInfo block) {
         block.getStorageUuid());
   }
 
+  /**
+   * Remove Replica from ReplicaMap
+   *
+   * @param block
+   * @param volume
+   * @return
+   */
+  public boolean removeReplicaFromMem(final ExtendedBlock block, final 
FsVolumeImpl volume) {
+    final String blockPoolId = block.getBlockPoolId();
+    final Block localBlock = block.getLocalBlock();
+    final long blockId = localBlock.getBlockId();
+    try (AutoCloseableLock lock = lockManager.writeLock(
+        LockLevel.BLOCK_POOl, blockPoolId)) {
+      final ReplicaInfo info = volumeMap.get(blockPoolId, localBlock);
+      if (info == null) {
+        ReplicaInfo infoByBlockId =
+            volumeMap.get(blockPoolId, blockId);
+        if (infoByBlockId == null) {
+          // It is okay if the block is not found -- it
+          // may be deleted earlier.
+          LOG.info("Failed to delete replica {}: ReplicaInfo not found " +
+                  "in removeReplicaFromMem.", localBlock);
+        } else {
+          LOG.error("Failed to delete replica {}: GenerationStamp not matched, 
" +
+              "existing replica is {} in removeReplicaFromMem.",
+              localBlock, Block.toString(infoByBlockId));
+        }
+        return false;
+      }
+
+      FsVolumeImpl v = (FsVolumeImpl)info.getVolume();
+      if (v == null) {
+        LOG.error("Failed to delete replica {}. No volume for this replica {} "
+            + "in removeReplicaFromMem.", localBlock, info);
+        return false;
+      }
+
+      try {
+        File blockFile = new File(info.getBlockURI());
+        if (blockFile != null && blockFile.getParentFile() == null) {
+          LOG.error("Failed to delete replica {}. Parent not found for block 
file: {} "
+              + "in removeReplicaFromMem.", localBlock, blockFile);
+          return false;
+        }
+      } catch(IllegalArgumentException e) {
+        LOG.warn("Parent directory check failed; replica {} is " +
+            "not backed by a local file in removeReplicaFromMem.", info);
+      }
+
+      if (!volume.getStorageID().equals(v.getStorageID())) {
+        LOG.error("Failed to delete replica {}. Appear different volumes, 
oldVolume={}" +
+            " and newVolume={} for this replica in removeReplicaFromMem.",
+            localBlock, volume, v);
+        return false;
+      }
+
+      ReplicaInfo removing = volumeMap.remove(blockPoolId, localBlock);
+      addDeletingBlock(blockPoolId, removing.getBlockId());
+      LOG.debug("Block file {} is to be deleted", removing.getBlockURI());
+      datanode.getMetrics().incrBlocksRemoved(1);
+      if (removing instanceof ReplicaInPipeline) {
+        ((ReplicaInPipeline) removing).releaseAllBytesReserved();
+      }
+    }
+
+    if (volume.isTransientStorage()) {
+      RamDiskReplicaTracker.RamDiskReplica replicaInfo = ramDiskReplicaTracker.
+          getReplica(blockPoolId, blockId);
+      if (replicaInfo != null) {
+        if (!replicaInfo.getIsPersisted()) {
+          datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
+        }
+        ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(),
+            replicaInfo.getBlockId(), true);
+      }
+    }
+
+    // If a DFSClient has the replica in its cache of short-circuit file
+    // descriptors (and the client is using ShortCircuitShm), invalidate it.
+    datanode.getShortCircuitRegistry().processBlockInvalidation(
+        new ExtendedBlockId(blockId, blockPoolId));

Review Comment:
   How about use the parameter `block`?



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java:
##########
@@ -2400,6 +2400,93 @@ public void invalidate(String bpid, ReplicaInfo block) {
         block.getStorageUuid());
   }
 
+  /**
+   * Remove Replica from ReplicaMap
+   *
+   * @param block
+   * @param volume
+   * @return
+   */
+  public boolean removeReplicaFromMem(final ExtendedBlock block, final 
FsVolumeImpl volume) {

Review Comment:
   Can remove the `public`



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java:
##########
@@ -2400,6 +2400,93 @@ public void invalidate(String bpid, ReplicaInfo block) {
         block.getStorageUuid());
   }
 
+  /**
+   * Remove Replica from ReplicaMap
+   *
+   * @param block
+   * @param volume
+   * @return
+   */
+  public boolean removeReplicaFromMem(final ExtendedBlock block, final 
FsVolumeImpl volume) {
+    final String blockPoolId = block.getBlockPoolId();
+    final Block localBlock = block.getLocalBlock();
+    final long blockId = localBlock.getBlockId();
+    try (AutoCloseableLock lock = lockManager.writeLock(
+        LockLevel.BLOCK_POOl, blockPoolId)) {
+      final ReplicaInfo info = volumeMap.get(blockPoolId, localBlock);
+      if (info == null) {
+        ReplicaInfo infoByBlockId =
+            volumeMap.get(blockPoolId, blockId);

Review Comment:
   single line.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to