This is an automated email from the ASF dual-hosted git repository. hexiaoqiao pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push: new cc66683b1a9 HDFS-17184. Improve BlockReceiver to throws DiskOutOfSpaceException when initialize. (#6044). Contributed by Haiyang Hu. cc66683b1a9 is described below commit cc66683b1a96596c6e30d9b644471aee0537d683 Author: huhaiyang <huhaiyang...@126.com> AuthorDate: Thu Sep 21 21:45:30 2023 +0800 HDFS-17184. Improve BlockReceiver to throws DiskOutOfSpaceException when initialize. (#6044). Contributed by Haiyang Hu. Signed-off-by: He Xiaoqiao <hexiaoq...@apache.org> --- .../org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java | 8 ++++---- .../server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java | 7 +++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 1c077098a9d..4829e8c5786 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -57,6 +57,7 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.tracing.Span; import org.apache.hadoop.tracing.Tracer; @@ -274,10 +275,9 @@ class BlockReceiver implements Closeable { if (isCreate) { BlockMetadataHeader.writeHeader(checksumOut, diskChecksum); } - } catch (ReplicaAlreadyExistsException bae) { - throw bae; - } catch (ReplicaNotFoundException bne) { - throw bne; + } catch (ReplicaAlreadyExistsException | ReplicaNotFoundException + | DiskOutOfSpaceException e) { + throw e; } catch(IOException ioe) { if (replicaInfo != null) { replicaInfo.releaseAllBytesReserved(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java index fe010b35a4c..fcf07a820da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java @@ -117,14 +117,13 @@ public class RoundRobinVolumeChoosingPolicy<V extends FsVolumeSpi> maxAvailable = availableVolumeSize; } + LOG.warn("The volume[{}] with the available space (={} B) is " + + "less than the block size (={} B).", volume.getBaseURI(), + availableVolumeSize, blockSize); if (curVolume == startVolume) { throw new DiskOutOfSpaceException("Out of space: " + "The volume with the most available space (=" + maxAvailable + " B) is less than the block size (=" + blockSize + " B)."); - } else { - LOG.warn("The volume[{}] with the available space (={} B) is " - + "less than the block size (={} B).", volume.getBaseURI(), - availableVolumeSize, blockSize); } } } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org