This is an automated email from the ASF dual-hosted git repository.
tasanuma pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.3 by this push:
new 1b11815befbe HDFS-17298. Fix NPE in DataNode.handleBadBlock and
BlockSender (#6374)
1b11815befbe is described below
commit 1b11815befbe3b9165057c48ef95ff5ee30e64ec
Author: huhaiyang <[email protected]>
AuthorDate: Tue Dec 26 08:41:10 2023 +0800
HDFS-17298. Fix NPE in DataNode.handleBadBlock and BlockSender (#6374)
(cherry picked from commit 7e2ebfc17ef25aa4472e576517105538cdcf43cd)
---
.../java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java | 8 +++++++-
.../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 8 ++++++--
2 files changed, 13 insertions(+), 3 deletions(-)
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index f2724d062cea..17b75a980159 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
@@ -295,7 +296,12 @@ class BlockSender implements java.io.Closeable {
(!is32Bit || length <= Integer.MAX_VALUE);
// Obtain a reference before reading data
- volumeRef = datanode.data.getVolume(block).obtainReference();
+ FsVolumeSpi volume = datanode.data.getVolume(block);
+ if (volume == null) {
+ LOG.warn("Cannot find FsVolumeSpi to obtain a reference for block:
{}", block);
+ throw new ReplicaNotFoundException(block);
+ }
+ volumeRef = volume.obtainReference();
/*
* (corruptChecksumOK, meta_file_exist): operation
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index d047a4d31312..df04ce79eeaa 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3840,8 +3840,12 @@ public class DataNode extends ReconfigurableBase
return;
}
if (!fromScanner && blockScanner.isEnabled()) {
- blockScanner.markSuspectBlock(data.getVolume(block).getStorageID(),
- block);
+ FsVolumeSpi volume = data.getVolume(block);
+ if (volume == null) {
+ LOG.warn("Cannot find FsVolumeSpi to handle bad block: {}", block);
+ return;
+ }
+ blockScanner.markSuspectBlock(volume.getStorageID(), block);
} else {
try {
reportBadBlocks(block);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]