This is an automated email from the ASF dual-hosted git repository.
tasanuma pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.3 by this push:
new e60b8a0 HDFS-16377. Should CheckNotNull before access FsDatasetSpi
(#3784)
e60b8a0 is described below
commit e60b8a035867b5ac29e65978452ae4d52ca1bec9
Author: litao <[email protected]>
AuthorDate: Thu Dec 16 12:49:50 2021 +0800
HDFS-16377. Should CheckNotNull before access FsDatasetSpi (#3784)
Reviewed-by: Viraj Jasani <[email protected]>
Signed-off-by: Takanobu Asanuma <[email protected]>
(cherry picked from commit 22f5e1885d21c9db9ceecf611128508542ec0f11)
---
.../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index f35534c..c1507a4 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -791,6 +791,7 @@ public class DataNode extends ReconfigurableBase
.newFixedThreadPool(changedVolumes.newLocations.size());
List<Future<IOException>> exceptions = Lists.newArrayList();
+ Preconditions.checkNotNull(data, "Storage not yet initialized");
for (final StorageLocation location : changedVolumes.newLocations) {
exceptions.add(service.submit(new Callable<IOException>() {
@Override
@@ -890,6 +891,7 @@ public class DataNode extends ReconfigurableBase
clearFailure, Joiner.on(",").join(storageLocations)));
IOException ioe = null;
+ Preconditions.checkNotNull(data, "Storage not yet initialized");
// Remove volumes and block infos from FsDataset.
data.removeVolumes(storageLocations, clearFailure);
@@ -1968,6 +1970,7 @@ public class DataNode extends ReconfigurableBase
FileInputStream fis[] = new FileInputStream[2];
try {
+ Preconditions.checkNotNull(data, "Storage not yet initialized");
fis[0] = (FileInputStream)data.getBlockInputStream(blk, 0);
fis[1] = DatanodeUtil.getMetaDataInputStream(blk, data);
} catch (ClassCastException e) {
@@ -2947,6 +2950,7 @@ public class DataNode extends ReconfigurableBase
@Override // InterDatanodeProtocol
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
+ Preconditions.checkNotNull(data, "Storage not yet initialized");
return data.initReplicaRecovery(rBlock);
}
@@ -2957,6 +2961,7 @@ public class DataNode extends ReconfigurableBase
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
final long recoveryId, final long newBlockId, final long newLength)
throws IOException {
+ Preconditions.checkNotNull(data, "Storage not yet initialized");
final Replica r = data.updateReplicaUnderRecovery(oldBlock,
recoveryId, newBlockId, newLength);
// Notify the namenode of the updated block info. This is important
@@ -3238,7 +3243,7 @@ public class DataNode extends ReconfigurableBase
"The block pool is still running. First do a refreshNamenodes to " +
"shutdown the block pool service");
}
-
+ Preconditions.checkNotNull(data, "Storage not yet initialized");
data.deleteBlockPool(blockPoolId, force);
}
@@ -3682,6 +3687,7 @@ public class DataNode extends ReconfigurableBase
@Override
public List<DatanodeVolumeInfo> getVolumeReport() throws IOException {
checkSuperuserPrivilege();
+ Preconditions.checkNotNull(data, "Storage not yet initialized");
Map<String, Object> volumeInfoMap = data.getVolumeInfoMap();
if (volumeInfoMap == null) {
LOG.warn("DataNode volume info not available.");
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]