[ 
https://issues.apache.org/jira/browse/HDFS-11182?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15763238#comment-15763238
 ] 

ASF GitHub Bot commented on HDFS-11182:
---------------------------------------

Github user xiaoyuyao commented on a diff in the pull request:

    https://github.com/apache/hadoop/pull/168#discussion_r93171165
  
    --- Diff: 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 ---
    @@ -3208,68 +3200,49 @@ public ShortCircuitRegistry 
getShortCircuitRegistry() {
       }
     
       /**
    -   * Check the disk error
    +   * Check the disk error synchronously.
        */
    -  private void checkDiskError() {
    -    Set<StorageLocation> unhealthyLocations = data.checkDataDir();
    -    if (unhealthyLocations != null && !unhealthyLocations.isEmpty()) {
    +  @VisibleForTesting
    +  public void checkDiskError() throws IOException {
    +    Set<FsVolumeSpi> unhealthyVolumes;
    +    try {
    +      unhealthyVolumes = volumeChecker.checkAllVolumes(data);
    +      LOG.info("checkDiskError got {} failed volumes - {}",
    +          unhealthyVolumes.size(), unhealthyVolumes);
    +      lastDiskErrorCheck = Time.monotonicNow();
    +    } catch (InterruptedException e) {
    +      LOG.error("Interruped while running disk check", e);
    +      throw new IOException("Interrupted while running disk check", e);
    +    }
    +    handleVolumeFailures(unhealthyVolumes);
    +  }
    +
    +  private void handleVolumeFailures(Set<FsVolumeSpi> unhealthyVolumes) {
    +    data.handleVolumeFailures(unhealthyVolumes);
    +    Set<StorageLocation> unhealthyLocations = new HashSet<>(
    +        unhealthyVolumes.size());
    +
    +    if (!unhealthyVolumes.isEmpty()) {
    +      StringBuilder sb = new StringBuilder("DataNode failed volumes:");
    +      for (FsVolumeSpi vol : unhealthyVolumes) {
    +        unhealthyLocations.add(vol.getStorageLocation());
    +        sb.append(vol.getStorageLocation()).append(";");
    +      }
    +
           try {
             // Remove all unhealthy volumes from DataNode.
             removeVolumes(unhealthyLocations, false);
           } catch (IOException e) {
             LOG.warn("Error occurred when removing unhealthy storage dirs: "
                 + e.getMessage(), e);
           }
    -      StringBuilder sb = new StringBuilder("DataNode failed volumes:");
    -      for (StorageLocation location : unhealthyLocations) {
    -        sb.append(location + ";");
    -      }
    +      LOG.info(sb.toString());
    --- End diff --
    
    Thanks for fixing that.


> Update DataNode to use DatasetVolumeChecker
> -------------------------------------------
>
>                 Key: HDFS-11182
>                 URL: https://issues.apache.org/jira/browse/HDFS-11182
>             Project: Hadoop HDFS
>          Issue Type: Sub-task
>          Components: datanode
>            Reporter: Arpit Agarwal
>            Assignee: Arpit Agarwal
>
> Update DataNode to use the DatasetVolumeChecker class introduced in 
> HDFS-11149 to parallelize disk checks.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to